##// END OF EJS Templates
context: extract partial nodeid lookup method to scmutil...
Martin von Zweigbergk -
r37522:901e749c default
parent child Browse files
Show More
@@ -1,2570 +1,2570 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirfilenodeids,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29 from . import (
30 30 dagop,
31 31 encoding,
32 32 error,
33 33 fileset,
34 34 match as matchmod,
35 35 obsolete as obsmod,
36 36 patch,
37 37 pathutil,
38 38 phases,
39 39 pycompat,
40 40 repoview,
41 41 revlog,
42 42 scmutil,
43 43 sparse,
44 44 subrepo,
45 45 subrepoutil,
46 46 util,
47 47 )
48 48 from .utils import (
49 49 dateutil,
50 50 stringutil,
51 51 )
52 52
53 53 propertycache = util.propertycache
54 54
55 55 nonascii = re.compile(br'[^\x21-\x7f]').search
56 56
57 57 class basectx(object):
58 58 """A basectx object represents the common logic for its children:
59 59 changectx: read-only context that is already present in the repo,
60 60 workingctx: a context that represents the working directory and can
61 61 be committed,
62 62 memctx: a context that represents changes in-memory and can also
63 63 be committed."""
64 64
65 65 def __init__(self, repo):
66 66 self._repo = repo
67 67
68 68 def __bytes__(self):
69 69 return short(self.node())
70 70
71 71 __str__ = encoding.strmethod(__bytes__)
72 72
73 73 def __repr__(self):
74 74 return r"<%s %s>" % (type(self).__name__, str(self))
75 75
76 76 def __eq__(self, other):
77 77 try:
78 78 return type(self) == type(other) and self._rev == other._rev
79 79 except AttributeError:
80 80 return False
81 81
82 82 def __ne__(self, other):
83 83 return not (self == other)
84 84
85 85 def __contains__(self, key):
86 86 return key in self._manifest
87 87
88 88 def __getitem__(self, key):
89 89 return self.filectx(key)
90 90
91 91 def __iter__(self):
92 92 return iter(self._manifest)
93 93
94 94 def _buildstatusmanifest(self, status):
95 95 """Builds a manifest that includes the given status results, if this is
96 96 a working copy context. For non-working copy contexts, it just returns
97 97 the normal manifest."""
98 98 return self.manifest()
99 99
100 100 def _matchstatus(self, other, match):
101 101 """This internal method provides a way for child objects to override the
102 102 match operator.
103 103 """
104 104 return match
105 105
106 106 def _buildstatus(self, other, s, match, listignored, listclean,
107 107 listunknown):
108 108 """build a status with respect to another context"""
109 109 # Load earliest manifest first for caching reasons. More specifically,
110 110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 112 # 1000 and cache it so that when you read 1001, we just need to apply a
113 113 # delta to what's in the cache. So that's one full reconstruction + one
114 114 # delta application.
115 115 mf2 = None
116 116 if self.rev() is not None and self.rev() < other.rev():
117 117 mf2 = self._buildstatusmanifest(s)
118 118 mf1 = other._buildstatusmanifest(s)
119 119 if mf2 is None:
120 120 mf2 = self._buildstatusmanifest(s)
121 121
122 122 modified, added = [], []
123 123 removed = []
124 124 clean = []
125 125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 126 deletedset = set(deleted)
127 127 d = mf1.diff(mf2, match=match, clean=listclean)
128 128 for fn, value in d.iteritems():
129 129 if fn in deletedset:
130 130 continue
131 131 if value is None:
132 132 clean.append(fn)
133 133 continue
134 134 (node1, flag1), (node2, flag2) = value
135 135 if node1 is None:
136 136 added.append(fn)
137 137 elif node2 is None:
138 138 removed.append(fn)
139 139 elif flag1 != flag2:
140 140 modified.append(fn)
141 141 elif node2 not in wdirfilenodeids:
142 142 # When comparing files between two commits, we save time by
143 143 # not comparing the file contents when the nodeids differ.
144 144 # Note that this means we incorrectly report a reverted change
145 145 # to a file as a modification.
146 146 modified.append(fn)
147 147 elif self[fn].cmp(other[fn]):
148 148 modified.append(fn)
149 149 else:
150 150 clean.append(fn)
151 151
152 152 if removed:
153 153 # need to filter files if they are already reported as removed
154 154 unknown = [fn for fn in unknown if fn not in mf1 and
155 155 (not match or match(fn))]
156 156 ignored = [fn for fn in ignored if fn not in mf1 and
157 157 (not match or match(fn))]
158 158 # if they're deleted, don't report them as removed
159 159 removed = [fn for fn in removed if fn not in deletedset]
160 160
161 161 return scmutil.status(modified, added, removed, deleted, unknown,
162 162 ignored, clean)
163 163
164 164 @propertycache
165 165 def substate(self):
166 166 return subrepoutil.state(self, self._repo.ui)
167 167
168 168 def subrev(self, subpath):
169 169 return self.substate[subpath][1]
170 170
171 171 def rev(self):
172 172 return self._rev
173 173 def node(self):
174 174 return self._node
175 175 def hex(self):
176 176 return hex(self.node())
177 177 def manifest(self):
178 178 return self._manifest
179 179 def manifestctx(self):
180 180 return self._manifestctx
181 181 def repo(self):
182 182 return self._repo
183 183 def phasestr(self):
184 184 return phases.phasenames[self.phase()]
185 185 def mutable(self):
186 186 return self.phase() > phases.public
187 187
188 188 def getfileset(self, expr):
189 189 return fileset.getfileset(self, expr)
190 190
191 191 def obsolete(self):
192 192 """True if the changeset is obsolete"""
193 193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194 194
195 195 def extinct(self):
196 196 """True if the changeset is extinct"""
197 197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198 198
199 199 def orphan(self):
200 200 """True if the changeset is not obsolete but it's ancestor are"""
201 201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
202 202
203 203 def phasedivergent(self):
204 204 """True if the changeset try to be a successor of a public changeset
205 205
206 206 Only non-public and non-obsolete changesets may be bumped.
207 207 """
208 208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
209 209
210 210 def contentdivergent(self):
211 211 """Is a successors of a changeset with multiple possible successors set
212 212
213 213 Only non-public and non-obsolete changesets may be divergent.
214 214 """
215 215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
216 216
217 217 def isunstable(self):
218 218 """True if the changeset is either unstable, bumped or divergent"""
219 219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
220 220
221 221 def instabilities(self):
222 222 """return the list of instabilities affecting this changeset.
223 223
224 224 Instabilities are returned as strings. possible values are:
225 225 - orphan,
226 226 - phase-divergent,
227 227 - content-divergent.
228 228 """
229 229 instabilities = []
230 230 if self.orphan():
231 231 instabilities.append('orphan')
232 232 if self.phasedivergent():
233 233 instabilities.append('phase-divergent')
234 234 if self.contentdivergent():
235 235 instabilities.append('content-divergent')
236 236 return instabilities
237 237
238 238 def parents(self):
239 239 """return contexts for each parent changeset"""
240 240 return self._parents
241 241
242 242 def p1(self):
243 243 return self._parents[0]
244 244
245 245 def p2(self):
246 246 parents = self._parents
247 247 if len(parents) == 2:
248 248 return parents[1]
249 249 return changectx(self._repo, nullrev)
250 250
251 251 def _fileinfo(self, path):
252 252 if r'_manifest' in self.__dict__:
253 253 try:
254 254 return self._manifest[path], self._manifest.flags(path)
255 255 except KeyError:
256 256 raise error.ManifestLookupError(self._node, path,
257 257 _('not found in manifest'))
258 258 if r'_manifestdelta' in self.__dict__ or path in self.files():
259 259 if path in self._manifestdelta:
260 260 return (self._manifestdelta[path],
261 261 self._manifestdelta.flags(path))
262 262 mfl = self._repo.manifestlog
263 263 try:
264 264 node, flag = mfl[self._changeset.manifest].find(path)
265 265 except KeyError:
266 266 raise error.ManifestLookupError(self._node, path,
267 267 _('not found in manifest'))
268 268
269 269 return node, flag
270 270
271 271 def filenode(self, path):
272 272 return self._fileinfo(path)[0]
273 273
274 274 def flags(self, path):
275 275 try:
276 276 return self._fileinfo(path)[1]
277 277 except error.LookupError:
278 278 return ''
279 279
280 280 def sub(self, path, allowcreate=True):
281 281 '''return a subrepo for the stored revision of path, never wdir()'''
282 282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283 283
284 284 def nullsub(self, path, pctx):
285 285 return subrepo.nullsubrepo(self, path, pctx)
286 286
287 287 def workingsub(self, path):
288 288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 289 context.
290 290 '''
291 291 return subrepo.subrepo(self, path, allowwdir=True)
292 292
293 293 def match(self, pats=None, include=None, exclude=None, default='glob',
294 294 listsubrepos=False, badfn=None):
295 295 r = self._repo
296 296 return matchmod.match(r.root, r.getcwd(), pats,
297 297 include, exclude, default,
298 298 auditor=r.nofsauditor, ctx=self,
299 299 listsubrepos=listsubrepos, badfn=badfn)
300 300
301 301 def diff(self, ctx2=None, match=None, **opts):
302 302 """Returns a diff generator for the given contexts and matcher"""
303 303 if ctx2 is None:
304 304 ctx2 = self.p1()
305 305 if ctx2 is not None:
306 306 ctx2 = self._repo[ctx2]
307 307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
308 308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309 309
310 310 def dirs(self):
311 311 return self._manifest.dirs()
312 312
313 313 def hasdir(self, dir):
314 314 return self._manifest.hasdir(dir)
315 315
316 316 def status(self, other=None, match=None, listignored=False,
317 317 listclean=False, listunknown=False, listsubrepos=False):
318 318 """return status of files between two nodes or node and working
319 319 directory.
320 320
321 321 If other is None, compare this node with working directory.
322 322
323 323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 324 """
325 325
326 326 ctx1 = self
327 327 ctx2 = self._repo[other]
328 328
329 329 # This next code block is, admittedly, fragile logic that tests for
330 330 # reversing the contexts and wouldn't need to exist if it weren't for
331 331 # the fast (and common) code path of comparing the working directory
332 332 # with its first parent.
333 333 #
334 334 # What we're aiming for here is the ability to call:
335 335 #
336 336 # workingctx.status(parentctx)
337 337 #
338 338 # If we always built the manifest for each context and compared those,
339 339 # then we'd be done. But the special case of the above call means we
340 340 # just copy the manifest of the parent.
341 341 reversed = False
342 342 if (not isinstance(ctx1, changectx)
343 343 and isinstance(ctx2, changectx)):
344 344 reversed = True
345 345 ctx1, ctx2 = ctx2, ctx1
346 346
347 347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 348 match = ctx2._matchstatus(ctx1, match)
349 349 r = scmutil.status([], [], [], [], [], [], [])
350 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 351 listunknown)
352 352
353 353 if reversed:
354 354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 355 # these make no sense to reverse.
356 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 357 r.clean)
358 358
359 359 if listsubrepos:
360 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 361 try:
362 362 rev2 = ctx2.subrev(subpath)
363 363 except KeyError:
364 364 # A subrepo that existed in node1 was deleted between
365 365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 366 # won't contain that subpath. The best we can do ignore it.
367 367 rev2 = None
368 368 submatch = matchmod.subdirmatcher(subpath, match)
369 369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 370 clean=listclean, unknown=listunknown,
371 371 listsubrepos=True)
372 372 for rfiles, sfiles in zip(r, s):
373 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374 374
375 375 for l in r:
376 376 l.sort()
377 377
378 378 return r
379 379
380 380 class changectx(basectx):
381 381 """A changecontext object makes access to data related to a particular
382 382 changeset convenient. It represents a read-only context already present in
383 383 the repo."""
384 384 def __init__(self, repo, changeid='.'):
385 385 """changeid is a revision number, node, or tag"""
386 386 super(changectx, self).__init__(repo)
387 387
388 388 try:
389 389 if isinstance(changeid, int):
390 390 self._node = repo.changelog.node(changeid)
391 391 self._rev = changeid
392 392 return
393 393 if changeid == 'null':
394 394 self._node = nullid
395 395 self._rev = nullrev
396 396 return
397 397 if changeid == 'tip':
398 398 self._node = repo.changelog.tip()
399 399 self._rev = repo.changelog.rev(self._node)
400 400 return
401 401 if (changeid == '.'
402 402 or repo.local() and changeid == repo.dirstate.p1()):
403 403 # this is a hack to delay/avoid loading obsmarkers
404 404 # when we know that '.' won't be hidden
405 405 self._node = repo.dirstate.p1()
406 406 self._rev = repo.unfiltered().changelog.rev(self._node)
407 407 return
408 408 if len(changeid) == 20:
409 409 try:
410 410 self._node = changeid
411 411 self._rev = repo.changelog.rev(changeid)
412 412 return
413 413 except error.FilteredLookupError:
414 414 raise
415 415 except LookupError:
416 416 pass
417 417
418 418 try:
419 419 r = int(changeid)
420 420 if '%d' % r != changeid:
421 421 raise ValueError
422 422 l = len(repo.changelog)
423 423 if r < 0:
424 424 r += l
425 425 if r < 0 or r >= l and r != wdirrev:
426 426 raise ValueError
427 427 self._rev = r
428 428 self._node = repo.changelog.node(r)
429 429 return
430 430 except error.FilteredIndexError:
431 431 raise
432 432 except (ValueError, OverflowError, IndexError):
433 433 pass
434 434
435 435 if len(changeid) == 40:
436 436 try:
437 437 self._node = bin(changeid)
438 438 self._rev = repo.changelog.rev(self._node)
439 439 return
440 440 except error.FilteredLookupError:
441 441 raise
442 442 except (TypeError, LookupError):
443 443 pass
444 444
445 445 # lookup bookmarks through the name interface
446 446 try:
447 447 self._node = repo.names.singlenode(repo, changeid)
448 448 self._rev = repo.changelog.rev(self._node)
449 449 return
450 450 except KeyError:
451 451 pass
452 452
453 self._node = repo.unfiltered().changelog._partialmatch(changeid)
453 self._node = scmutil.resolvepartialhexnodeid(repo, changeid)
454 454 if self._node is not None:
455 455 self._rev = repo.changelog.rev(self._node)
456 456 return
457 457
458 458 # lookup failed
459 459 # check if it might have come from damaged dirstate
460 460 #
461 461 # XXX we could avoid the unfiltered if we had a recognizable
462 462 # exception for filtered changeset access
463 463 if (repo.local()
464 464 and changeid in repo.unfiltered().dirstate.parents()):
465 465 msg = _("working directory has unknown parent '%s'!")
466 466 raise error.Abort(msg % short(changeid))
467 467 try:
468 468 if len(changeid) == 20 and nonascii(changeid):
469 469 changeid = hex(changeid)
470 470 except TypeError:
471 471 pass
472 472 except (error.FilteredIndexError, error.FilteredLookupError,
473 473 error.FilteredRepoLookupError):
474 474 raise
475 475 except IndexError:
476 476 pass
477 477 raise error.RepoLookupError(
478 478 _("unknown revision '%s'") % changeid)
479 479
480 480 def __hash__(self):
481 481 try:
482 482 return hash(self._rev)
483 483 except AttributeError:
484 484 return id(self)
485 485
486 486 def __nonzero__(self):
487 487 return self._rev != nullrev
488 488
489 489 __bool__ = __nonzero__
490 490
491 491 @propertycache
492 492 def _changeset(self):
493 493 return self._repo.changelog.changelogrevision(self.rev())
494 494
495 495 @propertycache
496 496 def _manifest(self):
497 497 return self._manifestctx.read()
498 498
499 499 @property
500 500 def _manifestctx(self):
501 501 return self._repo.manifestlog[self._changeset.manifest]
502 502
503 503 @propertycache
504 504 def _manifestdelta(self):
505 505 return self._manifestctx.readdelta()
506 506
507 507 @propertycache
508 508 def _parents(self):
509 509 repo = self._repo
510 510 p1, p2 = repo.changelog.parentrevs(self._rev)
511 511 if p2 == nullrev:
512 512 return [changectx(repo, p1)]
513 513 return [changectx(repo, p1), changectx(repo, p2)]
514 514
515 515 def changeset(self):
516 516 c = self._changeset
517 517 return (
518 518 c.manifest,
519 519 c.user,
520 520 c.date,
521 521 c.files,
522 522 c.description,
523 523 c.extra,
524 524 )
525 525 def manifestnode(self):
526 526 return self._changeset.manifest
527 527
528 528 def user(self):
529 529 return self._changeset.user
530 530 def date(self):
531 531 return self._changeset.date
532 532 def files(self):
533 533 return self._changeset.files
534 534 def description(self):
535 535 return self._changeset.description
536 536 def branch(self):
537 537 return encoding.tolocal(self._changeset.extra.get("branch"))
538 538 def closesbranch(self):
539 539 return 'close' in self._changeset.extra
540 540 def extra(self):
541 541 """Return a dict of extra information."""
542 542 return self._changeset.extra
543 543 def tags(self):
544 544 """Return a list of byte tag names"""
545 545 return self._repo.nodetags(self._node)
546 546 def bookmarks(self):
547 547 """Return a list of byte bookmark names."""
548 548 return self._repo.nodebookmarks(self._node)
549 549 def phase(self):
550 550 return self._repo._phasecache.phase(self._repo, self._rev)
551 551 def hidden(self):
552 552 return self._rev in repoview.filterrevs(self._repo, 'visible')
553 553
554 554 def isinmemory(self):
555 555 return False
556 556
557 557 def children(self):
558 558 """return list of changectx contexts for each child changeset.
559 559
560 560 This returns only the immediate child changesets. Use descendants() to
561 561 recursively walk children.
562 562 """
563 563 c = self._repo.changelog.children(self._node)
564 564 return [changectx(self._repo, x) for x in c]
565 565
566 566 def ancestors(self):
567 567 for a in self._repo.changelog.ancestors([self._rev]):
568 568 yield changectx(self._repo, a)
569 569
570 570 def descendants(self):
571 571 """Recursively yield all children of the changeset.
572 572
573 573 For just the immediate children, use children()
574 574 """
575 575 for d in self._repo.changelog.descendants([self._rev]):
576 576 yield changectx(self._repo, d)
577 577
578 578 def filectx(self, path, fileid=None, filelog=None):
579 579 """get a file context from this changeset"""
580 580 if fileid is None:
581 581 fileid = self.filenode(path)
582 582 return filectx(self._repo, path, fileid=fileid,
583 583 changectx=self, filelog=filelog)
584 584
585 585 def ancestor(self, c2, warn=False):
586 586 """return the "best" ancestor context of self and c2
587 587
588 588 If there are multiple candidates, it will show a message and check
589 589 merge.preferancestor configuration before falling back to the
590 590 revlog ancestor."""
591 591 # deal with workingctxs
592 592 n2 = c2._node
593 593 if n2 is None:
594 594 n2 = c2._parents[0]._node
595 595 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
596 596 if not cahs:
597 597 anc = nullid
598 598 elif len(cahs) == 1:
599 599 anc = cahs[0]
600 600 else:
601 601 # experimental config: merge.preferancestor
602 602 for r in self._repo.ui.configlist('merge', 'preferancestor'):
603 603 try:
604 604 ctx = scmutil.revsymbol(self._repo, r)
605 605 except error.RepoLookupError:
606 606 continue
607 607 anc = ctx.node()
608 608 if anc in cahs:
609 609 break
610 610 else:
611 611 anc = self._repo.changelog.ancestor(self._node, n2)
612 612 if warn:
613 613 self._repo.ui.status(
614 614 (_("note: using %s as ancestor of %s and %s\n") %
615 615 (short(anc), short(self._node), short(n2))) +
616 616 ''.join(_(" alternatively, use --config "
617 617 "merge.preferancestor=%s\n") %
618 618 short(n) for n in sorted(cahs) if n != anc))
619 619 return changectx(self._repo, anc)
620 620
621 621 def descendant(self, other):
622 622 """True if other is descendant of this changeset"""
623 623 return self._repo.changelog.descendant(self._rev, other._rev)
624 624
625 625 def walk(self, match):
626 626 '''Generates matching file names.'''
627 627
628 628 # Wrap match.bad method to have message with nodeid
629 629 def bad(fn, msg):
630 630 # The manifest doesn't know about subrepos, so don't complain about
631 631 # paths into valid subrepos.
632 632 if any(fn == s or fn.startswith(s + '/')
633 633 for s in self.substate):
634 634 return
635 635 match.bad(fn, _('no such file in rev %s') % self)
636 636
637 637 m = matchmod.badmatch(match, bad)
638 638 return self._manifest.walk(m)
639 639
640 640 def matches(self, match):
641 641 return self.walk(match)
642 642
643 643 class basefilectx(object):
644 644 """A filecontext object represents the common logic for its children:
645 645 filectx: read-only access to a filerevision that is already present
646 646 in the repo,
647 647 workingfilectx: a filecontext that represents files from the working
648 648 directory,
649 649 memfilectx: a filecontext that represents files in-memory,
650 650 overlayfilectx: duplicate another filecontext with some fields overridden.
651 651 """
652 652 @propertycache
653 653 def _filelog(self):
654 654 return self._repo.file(self._path)
655 655
656 656 @propertycache
657 657 def _changeid(self):
658 658 if r'_changeid' in self.__dict__:
659 659 return self._changeid
660 660 elif r'_changectx' in self.__dict__:
661 661 return self._changectx.rev()
662 662 elif r'_descendantrev' in self.__dict__:
663 663 # this file context was created from a revision with a known
664 664 # descendant, we can (lazily) correct for linkrev aliases
665 665 return self._adjustlinkrev(self._descendantrev)
666 666 else:
667 667 return self._filelog.linkrev(self._filerev)
668 668
669 669 @propertycache
670 670 def _filenode(self):
671 671 if r'_fileid' in self.__dict__:
672 672 return self._filelog.lookup(self._fileid)
673 673 else:
674 674 return self._changectx.filenode(self._path)
675 675
676 676 @propertycache
677 677 def _filerev(self):
678 678 return self._filelog.rev(self._filenode)
679 679
680 680 @propertycache
681 681 def _repopath(self):
682 682 return self._path
683 683
684 684 def __nonzero__(self):
685 685 try:
686 686 self._filenode
687 687 return True
688 688 except error.LookupError:
689 689 # file is missing
690 690 return False
691 691
692 692 __bool__ = __nonzero__
693 693
694 694 def __bytes__(self):
695 695 try:
696 696 return "%s@%s" % (self.path(), self._changectx)
697 697 except error.LookupError:
698 698 return "%s@???" % self.path()
699 699
700 700 __str__ = encoding.strmethod(__bytes__)
701 701
702 702 def __repr__(self):
703 703 return r"<%s %s>" % (type(self).__name__, str(self))
704 704
705 705 def __hash__(self):
706 706 try:
707 707 return hash((self._path, self._filenode))
708 708 except AttributeError:
709 709 return id(self)
710 710
711 711 def __eq__(self, other):
712 712 try:
713 713 return (type(self) == type(other) and self._path == other._path
714 714 and self._filenode == other._filenode)
715 715 except AttributeError:
716 716 return False
717 717
718 718 def __ne__(self, other):
719 719 return not (self == other)
720 720
721 721 def filerev(self):
722 722 return self._filerev
723 723 def filenode(self):
724 724 return self._filenode
725 725 @propertycache
726 726 def _flags(self):
727 727 return self._changectx.flags(self._path)
728 728 def flags(self):
729 729 return self._flags
730 730 def filelog(self):
731 731 return self._filelog
732 732 def rev(self):
733 733 return self._changeid
734 734 def linkrev(self):
735 735 return self._filelog.linkrev(self._filerev)
736 736 def node(self):
737 737 return self._changectx.node()
738 738 def hex(self):
739 739 return self._changectx.hex()
740 740 def user(self):
741 741 return self._changectx.user()
742 742 def date(self):
743 743 return self._changectx.date()
744 744 def files(self):
745 745 return self._changectx.files()
746 746 def description(self):
747 747 return self._changectx.description()
748 748 def branch(self):
749 749 return self._changectx.branch()
750 750 def extra(self):
751 751 return self._changectx.extra()
752 752 def phase(self):
753 753 return self._changectx.phase()
754 754 def phasestr(self):
755 755 return self._changectx.phasestr()
756 756 def obsolete(self):
757 757 return self._changectx.obsolete()
758 758 def instabilities(self):
759 759 return self._changectx.instabilities()
760 760 def manifest(self):
761 761 return self._changectx.manifest()
762 762 def changectx(self):
763 763 return self._changectx
764 764 def renamed(self):
765 765 return self._copied
766 766 def repo(self):
767 767 return self._repo
768 768 def size(self):
769 769 return len(self.data())
770 770
771 771 def path(self):
772 772 return self._path
773 773
774 774 def isbinary(self):
775 775 try:
776 776 return stringutil.binary(self.data())
777 777 except IOError:
778 778 return False
779 779 def isexec(self):
780 780 return 'x' in self.flags()
781 781 def islink(self):
782 782 return 'l' in self.flags()
783 783
784 784 def isabsent(self):
785 785 """whether this filectx represents a file not in self._changectx
786 786
787 787 This is mainly for merge code to detect change/delete conflicts. This is
788 788 expected to be True for all subclasses of basectx."""
789 789 return False
790 790
791 791 _customcmp = False
792 792 def cmp(self, fctx):
793 793 """compare with other file context
794 794
795 795 returns True if different than fctx.
796 796 """
797 797 if fctx._customcmp:
798 798 return fctx.cmp(self)
799 799
800 800 if (fctx._filenode is None
801 801 and (self._repo._encodefilterpats
802 802 # if file data starts with '\1\n', empty metadata block is
803 803 # prepended, which adds 4 bytes to filelog.size().
804 804 or self.size() - 4 == fctx.size())
805 805 or self.size() == fctx.size()):
806 806 return self._filelog.cmp(self._filenode, fctx.data())
807 807
808 808 return True
809 809
810 810 def _adjustlinkrev(self, srcrev, inclusive=False):
811 811 """return the first ancestor of <srcrev> introducing <fnode>
812 812
813 813 If the linkrev of the file revision does not point to an ancestor of
814 814 srcrev, we'll walk down the ancestors until we find one introducing
815 815 this file revision.
816 816
817 817 :srcrev: the changeset revision we search ancestors from
818 818 :inclusive: if true, the src revision will also be checked
819 819 """
820 820 repo = self._repo
821 821 cl = repo.unfiltered().changelog
822 822 mfl = repo.manifestlog
823 823 # fetch the linkrev
824 824 lkr = self.linkrev()
825 825 # hack to reuse ancestor computation when searching for renames
826 826 memberanc = getattr(self, '_ancestrycontext', None)
827 827 iteranc = None
828 828 if srcrev is None:
829 829 # wctx case, used by workingfilectx during mergecopy
830 830 revs = [p.rev() for p in self._repo[None].parents()]
831 831 inclusive = True # we skipped the real (revless) source
832 832 else:
833 833 revs = [srcrev]
834 834 if memberanc is None:
835 835 memberanc = iteranc = cl.ancestors(revs, lkr,
836 836 inclusive=inclusive)
837 837 # check if this linkrev is an ancestor of srcrev
838 838 if lkr not in memberanc:
839 839 if iteranc is None:
840 840 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
841 841 fnode = self._filenode
842 842 path = self._path
843 843 for a in iteranc:
844 844 ac = cl.read(a) # get changeset data (we avoid object creation)
845 845 if path in ac[3]: # checking the 'files' field.
846 846 # The file has been touched, check if the content is
847 847 # similar to the one we search for.
848 848 if fnode == mfl[ac[0]].readfast().get(path):
849 849 return a
850 850 # In theory, we should never get out of that loop without a result.
851 851 # But if manifest uses a buggy file revision (not children of the
852 852 # one it replaces) we could. Such a buggy situation will likely
853 853 # result is crash somewhere else at to some point.
854 854 return lkr
855 855
856 856 def introrev(self):
857 857 """return the rev of the changeset which introduced this file revision
858 858
859 859 This method is different from linkrev because it take into account the
860 860 changeset the filectx was created from. It ensures the returned
861 861 revision is one of its ancestors. This prevents bugs from
862 862 'linkrev-shadowing' when a file revision is used by multiple
863 863 changesets.
864 864 """
865 865 lkr = self.linkrev()
866 866 attrs = vars(self)
867 867 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
868 868 if noctx or self.rev() == lkr:
869 869 return self.linkrev()
870 870 return self._adjustlinkrev(self.rev(), inclusive=True)
871 871
872 872 def introfilectx(self):
873 873 """Return filectx having identical contents, but pointing to the
874 874 changeset revision where this filectx was introduced"""
875 875 introrev = self.introrev()
876 876 if self.rev() == introrev:
877 877 return self
878 878 return self.filectx(self.filenode(), changeid=introrev)
879 879
880 880 def _parentfilectx(self, path, fileid, filelog):
881 881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 883 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
884 884 # If self is associated with a changeset (probably explicitly
885 885 # fed), ensure the created filectx is associated with a
886 886 # changeset that is an ancestor of self.changectx.
887 887 # This lets us later use _adjustlinkrev to get a correct link.
888 888 fctx._descendantrev = self.rev()
889 889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 890 elif r'_descendantrev' in vars(self):
891 891 # Otherwise propagate _descendantrev if we have one associated.
892 892 fctx._descendantrev = self._descendantrev
893 893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 894 return fctx
895 895
896 896 def parents(self):
897 897 _path = self._path
898 898 fl = self._filelog
899 899 parents = self._filelog.parents(self._filenode)
900 900 pl = [(_path, node, fl) for node in parents if node != nullid]
901 901
902 902 r = fl.renamed(self._filenode)
903 903 if r:
904 904 # - In the simple rename case, both parent are nullid, pl is empty.
905 905 # - In case of merge, only one of the parent is null id and should
906 906 # be replaced with the rename information. This parent is -always-
907 907 # the first one.
908 908 #
909 909 # As null id have always been filtered out in the previous list
910 910 # comprehension, inserting to 0 will always result in "replacing
911 911 # first nullid parent with rename information.
912 912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913 913
914 914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915 915
916 916 def p1(self):
917 917 return self.parents()[0]
918 918
919 919 def p2(self):
920 920 p = self.parents()
921 921 if len(p) == 2:
922 922 return p[1]
923 923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924 924
925 925 def annotate(self, follow=False, skiprevs=None, diffopts=None):
926 926 """Returns a list of annotateline objects for each line in the file
927 927
928 928 - line.fctx is the filectx of the node where that line was last changed
929 929 - line.lineno is the line number at the first appearance in the managed
930 930 file
931 931 - line.text is the data on that line (including newline character)
932 932 """
933 933 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
934 934
935 935 def parents(f):
936 936 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
937 937 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
938 938 # from the topmost introrev (= srcrev) down to p.linkrev() if it
939 939 # isn't an ancestor of the srcrev.
940 940 f._changeid
941 941 pl = f.parents()
942 942
943 943 # Don't return renamed parents if we aren't following.
944 944 if not follow:
945 945 pl = [p for p in pl if p.path() == f.path()]
946 946
947 947 # renamed filectx won't have a filelog yet, so set it
948 948 # from the cache to save time
949 949 for p in pl:
950 950 if not r'_filelog' in p.__dict__:
951 951 p._filelog = getlog(p.path())
952 952
953 953 return pl
954 954
955 955 # use linkrev to find the first changeset where self appeared
956 956 base = self.introfilectx()
957 957 if getattr(base, '_ancestrycontext', None) is None:
958 958 cl = self._repo.changelog
959 959 if base.rev() is None:
960 960 # wctx is not inclusive, but works because _ancestrycontext
961 961 # is used to test filelog revisions
962 962 ac = cl.ancestors([p.rev() for p in base.parents()],
963 963 inclusive=True)
964 964 else:
965 965 ac = cl.ancestors([base.rev()], inclusive=True)
966 966 base._ancestrycontext = ac
967 967
968 968 return dagop.annotate(base, parents, skiprevs=skiprevs,
969 969 diffopts=diffopts)
970 970
971 971 def ancestors(self, followfirst=False):
972 972 visit = {}
973 973 c = self
974 974 if followfirst:
975 975 cut = 1
976 976 else:
977 977 cut = None
978 978
979 979 while True:
980 980 for parent in c.parents()[:cut]:
981 981 visit[(parent.linkrev(), parent.filenode())] = parent
982 982 if not visit:
983 983 break
984 984 c = visit.pop(max(visit))
985 985 yield c
986 986
987 987 def decodeddata(self):
988 988 """Returns `data()` after running repository decoding filters.
989 989
990 990 This is often equivalent to how the data would be expressed on disk.
991 991 """
992 992 return self._repo.wwritedata(self.path(), self.data())
993 993
994 994 class filectx(basefilectx):
995 995 """A filecontext object makes access to data related to a particular
996 996 filerevision convenient."""
997 997 def __init__(self, repo, path, changeid=None, fileid=None,
998 998 filelog=None, changectx=None):
999 999 """changeid can be a changeset revision, node, or tag.
1000 1000 fileid can be a file revision or node."""
1001 1001 self._repo = repo
1002 1002 self._path = path
1003 1003
1004 1004 assert (changeid is not None
1005 1005 or fileid is not None
1006 1006 or changectx is not None), \
1007 1007 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1008 1008 % (changeid, fileid, changectx))
1009 1009
1010 1010 if filelog is not None:
1011 1011 self._filelog = filelog
1012 1012
1013 1013 if changeid is not None:
1014 1014 self._changeid = changeid
1015 1015 if changectx is not None:
1016 1016 self._changectx = changectx
1017 1017 if fileid is not None:
1018 1018 self._fileid = fileid
1019 1019
1020 1020 @propertycache
1021 1021 def _changectx(self):
1022 1022 try:
1023 1023 return changectx(self._repo, self._changeid)
1024 1024 except error.FilteredRepoLookupError:
1025 1025 # Linkrev may point to any revision in the repository. When the
1026 1026 # repository is filtered this may lead to `filectx` trying to build
1027 1027 # `changectx` for filtered revision. In such case we fallback to
1028 1028 # creating `changectx` on the unfiltered version of the reposition.
1029 1029 # This fallback should not be an issue because `changectx` from
1030 1030 # `filectx` are not used in complex operations that care about
1031 1031 # filtering.
1032 1032 #
1033 1033 # This fallback is a cheap and dirty fix that prevent several
1034 1034 # crashes. It does not ensure the behavior is correct. However the
1035 1035 # behavior was not correct before filtering either and "incorrect
1036 1036 # behavior" is seen as better as "crash"
1037 1037 #
1038 1038 # Linkrevs have several serious troubles with filtering that are
1039 1039 # complicated to solve. Proper handling of the issue here should be
1040 1040 # considered when solving linkrev issue are on the table.
1041 1041 return changectx(self._repo.unfiltered(), self._changeid)
1042 1042
1043 1043 def filectx(self, fileid, changeid=None):
1044 1044 '''opens an arbitrary revision of the file without
1045 1045 opening a new filelog'''
1046 1046 return filectx(self._repo, self._path, fileid=fileid,
1047 1047 filelog=self._filelog, changeid=changeid)
1048 1048
1049 1049 def rawdata(self):
1050 1050 return self._filelog.revision(self._filenode, raw=True)
1051 1051
1052 1052 def rawflags(self):
1053 1053 """low-level revlog flags"""
1054 1054 return self._filelog.flags(self._filerev)
1055 1055
1056 1056 def data(self):
1057 1057 try:
1058 1058 return self._filelog.read(self._filenode)
1059 1059 except error.CensoredNodeError:
1060 1060 if self._repo.ui.config("censor", "policy") == "ignore":
1061 1061 return ""
1062 1062 raise error.Abort(_("censored node: %s") % short(self._filenode),
1063 1063 hint=_("set censor.policy to ignore errors"))
1064 1064
1065 1065 def size(self):
1066 1066 return self._filelog.size(self._filerev)
1067 1067
1068 1068 @propertycache
1069 1069 def _copied(self):
1070 1070 """check if file was actually renamed in this changeset revision
1071 1071
1072 1072 If rename logged in file revision, we report copy for changeset only
1073 1073 if file revisions linkrev points back to the changeset in question
1074 1074 or both changeset parents contain different file revisions.
1075 1075 """
1076 1076
1077 1077 renamed = self._filelog.renamed(self._filenode)
1078 1078 if not renamed:
1079 1079 return renamed
1080 1080
1081 1081 if self.rev() == self.linkrev():
1082 1082 return renamed
1083 1083
1084 1084 name = self.path()
1085 1085 fnode = self._filenode
1086 1086 for p in self._changectx.parents():
1087 1087 try:
1088 1088 if fnode == p.filenode(name):
1089 1089 return None
1090 1090 except error.LookupError:
1091 1091 pass
1092 1092 return renamed
1093 1093
1094 1094 def children(self):
1095 1095 # hard for renames
1096 1096 c = self._filelog.children(self._filenode)
1097 1097 return [filectx(self._repo, self._path, fileid=x,
1098 1098 filelog=self._filelog) for x in c]
1099 1099
1100 1100 class committablectx(basectx):
1101 1101 """A committablectx object provides common functionality for a context that
1102 1102 wants the ability to commit, e.g. workingctx or memctx."""
1103 1103 def __init__(self, repo, text="", user=None, date=None, extra=None,
1104 1104 changes=None):
1105 1105 super(committablectx, self).__init__(repo)
1106 1106 self._rev = None
1107 1107 self._node = None
1108 1108 self._text = text
1109 1109 if date:
1110 1110 self._date = dateutil.parsedate(date)
1111 1111 if user:
1112 1112 self._user = user
1113 1113 if changes:
1114 1114 self._status = changes
1115 1115
1116 1116 self._extra = {}
1117 1117 if extra:
1118 1118 self._extra = extra.copy()
1119 1119 if 'branch' not in self._extra:
1120 1120 try:
1121 1121 branch = encoding.fromlocal(self._repo.dirstate.branch())
1122 1122 except UnicodeDecodeError:
1123 1123 raise error.Abort(_('branch name not in UTF-8!'))
1124 1124 self._extra['branch'] = branch
1125 1125 if self._extra['branch'] == '':
1126 1126 self._extra['branch'] = 'default'
1127 1127
1128 1128 def __bytes__(self):
1129 1129 return bytes(self._parents[0]) + "+"
1130 1130
1131 1131 __str__ = encoding.strmethod(__bytes__)
1132 1132
1133 1133 def __nonzero__(self):
1134 1134 return True
1135 1135
1136 1136 __bool__ = __nonzero__
1137 1137
1138 1138 def _buildflagfunc(self):
1139 1139 # Create a fallback function for getting file flags when the
1140 1140 # filesystem doesn't support them
1141 1141
1142 1142 copiesget = self._repo.dirstate.copies().get
1143 1143 parents = self.parents()
1144 1144 if len(parents) < 2:
1145 1145 # when we have one parent, it's easy: copy from parent
1146 1146 man = parents[0].manifest()
1147 1147 def func(f):
1148 1148 f = copiesget(f, f)
1149 1149 return man.flags(f)
1150 1150 else:
1151 1151 # merges are tricky: we try to reconstruct the unstored
1152 1152 # result from the merge (issue1802)
1153 1153 p1, p2 = parents
1154 1154 pa = p1.ancestor(p2)
1155 1155 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1156 1156
1157 1157 def func(f):
1158 1158 f = copiesget(f, f) # may be wrong for merges with copies
1159 1159 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1160 1160 if fl1 == fl2:
1161 1161 return fl1
1162 1162 if fl1 == fla:
1163 1163 return fl2
1164 1164 if fl2 == fla:
1165 1165 return fl1
1166 1166 return '' # punt for conflicts
1167 1167
1168 1168 return func
1169 1169
1170 1170 @propertycache
1171 1171 def _flagfunc(self):
1172 1172 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1173 1173
1174 1174 @propertycache
1175 1175 def _status(self):
1176 1176 return self._repo.status()
1177 1177
1178 1178 @propertycache
1179 1179 def _user(self):
1180 1180 return self._repo.ui.username()
1181 1181
1182 1182 @propertycache
1183 1183 def _date(self):
1184 1184 ui = self._repo.ui
1185 1185 date = ui.configdate('devel', 'default-date')
1186 1186 if date is None:
1187 1187 date = dateutil.makedate()
1188 1188 return date
1189 1189
1190 1190 def subrev(self, subpath):
1191 1191 return None
1192 1192
1193 1193 def manifestnode(self):
1194 1194 return None
1195 1195 def user(self):
1196 1196 return self._user or self._repo.ui.username()
1197 1197 def date(self):
1198 1198 return self._date
1199 1199 def description(self):
1200 1200 return self._text
1201 1201 def files(self):
1202 1202 return sorted(self._status.modified + self._status.added +
1203 1203 self._status.removed)
1204 1204
1205 1205 def modified(self):
1206 1206 return self._status.modified
1207 1207 def added(self):
1208 1208 return self._status.added
1209 1209 def removed(self):
1210 1210 return self._status.removed
1211 1211 def deleted(self):
1212 1212 return self._status.deleted
1213 1213 def branch(self):
1214 1214 return encoding.tolocal(self._extra['branch'])
1215 1215 def closesbranch(self):
1216 1216 return 'close' in self._extra
1217 1217 def extra(self):
1218 1218 return self._extra
1219 1219
1220 1220 def isinmemory(self):
1221 1221 return False
1222 1222
1223 1223 def tags(self):
1224 1224 return []
1225 1225
1226 1226 def bookmarks(self):
1227 1227 b = []
1228 1228 for p in self.parents():
1229 1229 b.extend(p.bookmarks())
1230 1230 return b
1231 1231
1232 1232 def phase(self):
1233 1233 phase = phases.draft # default phase to draft
1234 1234 for p in self.parents():
1235 1235 phase = max(phase, p.phase())
1236 1236 return phase
1237 1237
1238 1238 def hidden(self):
1239 1239 return False
1240 1240
1241 1241 def children(self):
1242 1242 return []
1243 1243
1244 1244 def flags(self, path):
1245 1245 if r'_manifest' in self.__dict__:
1246 1246 try:
1247 1247 return self._manifest.flags(path)
1248 1248 except KeyError:
1249 1249 return ''
1250 1250
1251 1251 try:
1252 1252 return self._flagfunc(path)
1253 1253 except OSError:
1254 1254 return ''
1255 1255
1256 1256 def ancestor(self, c2):
1257 1257 """return the "best" ancestor context of self and c2"""
1258 1258 return self._parents[0].ancestor(c2) # punt on two parents for now
1259 1259
1260 1260 def walk(self, match):
1261 1261 '''Generates matching file names.'''
1262 1262 return sorted(self._repo.dirstate.walk(match,
1263 1263 subrepos=sorted(self.substate),
1264 1264 unknown=True, ignored=False))
1265 1265
1266 1266 def matches(self, match):
1267 1267 return sorted(self._repo.dirstate.matches(match))
1268 1268
1269 1269 def ancestors(self):
1270 1270 for p in self._parents:
1271 1271 yield p
1272 1272 for a in self._repo.changelog.ancestors(
1273 1273 [p.rev() for p in self._parents]):
1274 1274 yield changectx(self._repo, a)
1275 1275
1276 1276 def markcommitted(self, node):
1277 1277 """Perform post-commit cleanup necessary after committing this ctx
1278 1278
1279 1279 Specifically, this updates backing stores this working context
1280 1280 wraps to reflect the fact that the changes reflected by this
1281 1281 workingctx have been committed. For example, it marks
1282 1282 modified and added files as normal in the dirstate.
1283 1283
1284 1284 """
1285 1285
1286 1286 with self._repo.dirstate.parentchange():
1287 1287 for f in self.modified() + self.added():
1288 1288 self._repo.dirstate.normal(f)
1289 1289 for f in self.removed():
1290 1290 self._repo.dirstate.drop(f)
1291 1291 self._repo.dirstate.setparents(node)
1292 1292
1293 1293 # write changes out explicitly, because nesting wlock at
1294 1294 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1295 1295 # from immediately doing so for subsequent changing files
1296 1296 self._repo.dirstate.write(self._repo.currenttransaction())
1297 1297
1298 1298 def dirty(self, missing=False, merge=True, branch=True):
1299 1299 return False
1300 1300
1301 1301 class workingctx(committablectx):
1302 1302 """A workingctx object makes access to data related to
1303 1303 the current working directory convenient.
1304 1304 date - any valid date string or (unixtime, offset), or None.
1305 1305 user - username string, or None.
1306 1306 extra - a dictionary of extra values, or None.
1307 1307 changes - a list of file lists as returned by localrepo.status()
1308 1308 or None to use the repository status.
1309 1309 """
1310 1310 def __init__(self, repo, text="", user=None, date=None, extra=None,
1311 1311 changes=None):
1312 1312 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1313 1313
1314 1314 def __iter__(self):
1315 1315 d = self._repo.dirstate
1316 1316 for f in d:
1317 1317 if d[f] != 'r':
1318 1318 yield f
1319 1319
1320 1320 def __contains__(self, key):
1321 1321 return self._repo.dirstate[key] not in "?r"
1322 1322
1323 1323 def hex(self):
1324 1324 return hex(wdirid)
1325 1325
1326 1326 @propertycache
1327 1327 def _parents(self):
1328 1328 p = self._repo.dirstate.parents()
1329 1329 if p[1] == nullid:
1330 1330 p = p[:-1]
1331 1331 return [changectx(self._repo, x) for x in p]
1332 1332
1333 1333 def _fileinfo(self, path):
1334 1334 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1335 1335 self._manifest
1336 1336 return super(workingctx, self)._fileinfo(path)
1337 1337
1338 1338 def filectx(self, path, filelog=None):
1339 1339 """get a file context from the working directory"""
1340 1340 return workingfilectx(self._repo, path, workingctx=self,
1341 1341 filelog=filelog)
1342 1342
1343 1343 def dirty(self, missing=False, merge=True, branch=True):
1344 1344 "check whether a working directory is modified"
1345 1345 # check subrepos first
1346 1346 for s in sorted(self.substate):
1347 1347 if self.sub(s).dirty(missing=missing):
1348 1348 return True
1349 1349 # check current working dir
1350 1350 return ((merge and self.p2()) or
1351 1351 (branch and self.branch() != self.p1().branch()) or
1352 1352 self.modified() or self.added() or self.removed() or
1353 1353 (missing and self.deleted()))
1354 1354
1355 1355 def add(self, list, prefix=""):
1356 1356 with self._repo.wlock():
1357 1357 ui, ds = self._repo.ui, self._repo.dirstate
1358 1358 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1359 1359 rejected = []
1360 1360 lstat = self._repo.wvfs.lstat
1361 1361 for f in list:
1362 1362 # ds.pathto() returns an absolute file when this is invoked from
1363 1363 # the keyword extension. That gets flagged as non-portable on
1364 1364 # Windows, since it contains the drive letter and colon.
1365 1365 scmutil.checkportable(ui, os.path.join(prefix, f))
1366 1366 try:
1367 1367 st = lstat(f)
1368 1368 except OSError:
1369 1369 ui.warn(_("%s does not exist!\n") % uipath(f))
1370 1370 rejected.append(f)
1371 1371 continue
1372 1372 if st.st_size > 10000000:
1373 1373 ui.warn(_("%s: up to %d MB of RAM may be required "
1374 1374 "to manage this file\n"
1375 1375 "(use 'hg revert %s' to cancel the "
1376 1376 "pending addition)\n")
1377 1377 % (f, 3 * st.st_size // 1000000, uipath(f)))
1378 1378 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1379 1379 ui.warn(_("%s not added: only files and symlinks "
1380 1380 "supported currently\n") % uipath(f))
1381 1381 rejected.append(f)
1382 1382 elif ds[f] in 'amn':
1383 1383 ui.warn(_("%s already tracked!\n") % uipath(f))
1384 1384 elif ds[f] == 'r':
1385 1385 ds.normallookup(f)
1386 1386 else:
1387 1387 ds.add(f)
1388 1388 return rejected
1389 1389
1390 1390 def forget(self, files, prefix=""):
1391 1391 with self._repo.wlock():
1392 1392 ds = self._repo.dirstate
1393 1393 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1394 1394 rejected = []
1395 1395 for f in files:
1396 1396 if f not in self._repo.dirstate:
1397 1397 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1398 1398 rejected.append(f)
1399 1399 elif self._repo.dirstate[f] != 'a':
1400 1400 self._repo.dirstate.remove(f)
1401 1401 else:
1402 1402 self._repo.dirstate.drop(f)
1403 1403 return rejected
1404 1404
1405 1405 def undelete(self, list):
1406 1406 pctxs = self.parents()
1407 1407 with self._repo.wlock():
1408 1408 ds = self._repo.dirstate
1409 1409 for f in list:
1410 1410 if self._repo.dirstate[f] != 'r':
1411 1411 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1412 1412 else:
1413 1413 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1414 1414 t = fctx.data()
1415 1415 self._repo.wwrite(f, t, fctx.flags())
1416 1416 self._repo.dirstate.normal(f)
1417 1417
1418 1418 def copy(self, source, dest):
1419 1419 try:
1420 1420 st = self._repo.wvfs.lstat(dest)
1421 1421 except OSError as err:
1422 1422 if err.errno != errno.ENOENT:
1423 1423 raise
1424 1424 self._repo.ui.warn(_("%s does not exist!\n")
1425 1425 % self._repo.dirstate.pathto(dest))
1426 1426 return
1427 1427 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1428 1428 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1429 1429 "symbolic link\n")
1430 1430 % self._repo.dirstate.pathto(dest))
1431 1431 else:
1432 1432 with self._repo.wlock():
1433 1433 if self._repo.dirstate[dest] in '?':
1434 1434 self._repo.dirstate.add(dest)
1435 1435 elif self._repo.dirstate[dest] in 'r':
1436 1436 self._repo.dirstate.normallookup(dest)
1437 1437 self._repo.dirstate.copy(source, dest)
1438 1438
1439 1439 def match(self, pats=None, include=None, exclude=None, default='glob',
1440 1440 listsubrepos=False, badfn=None):
1441 1441 r = self._repo
1442 1442
1443 1443 # Only a case insensitive filesystem needs magic to translate user input
1444 1444 # to actual case in the filesystem.
1445 1445 icasefs = not util.fscasesensitive(r.root)
1446 1446 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1447 1447 default, auditor=r.auditor, ctx=self,
1448 1448 listsubrepos=listsubrepos, badfn=badfn,
1449 1449 icasefs=icasefs)
1450 1450
1451 1451 def _filtersuspectsymlink(self, files):
1452 1452 if not files or self._repo.dirstate._checklink:
1453 1453 return files
1454 1454
1455 1455 # Symlink placeholders may get non-symlink-like contents
1456 1456 # via user error or dereferencing by NFS or Samba servers,
1457 1457 # so we filter out any placeholders that don't look like a
1458 1458 # symlink
1459 1459 sane = []
1460 1460 for f in files:
1461 1461 if self.flags(f) == 'l':
1462 1462 d = self[f].data()
1463 1463 if (d == '' or len(d) >= 1024 or '\n' in d
1464 1464 or stringutil.binary(d)):
1465 1465 self._repo.ui.debug('ignoring suspect symlink placeholder'
1466 1466 ' "%s"\n' % f)
1467 1467 continue
1468 1468 sane.append(f)
1469 1469 return sane
1470 1470
1471 1471 def _checklookup(self, files):
1472 1472 # check for any possibly clean files
1473 1473 if not files:
1474 1474 return [], [], []
1475 1475
1476 1476 modified = []
1477 1477 deleted = []
1478 1478 fixup = []
1479 1479 pctx = self._parents[0]
1480 1480 # do a full compare of any files that might have changed
1481 1481 for f in sorted(files):
1482 1482 try:
1483 1483 # This will return True for a file that got replaced by a
1484 1484 # directory in the interim, but fixing that is pretty hard.
1485 1485 if (f not in pctx or self.flags(f) != pctx.flags(f)
1486 1486 or pctx[f].cmp(self[f])):
1487 1487 modified.append(f)
1488 1488 else:
1489 1489 fixup.append(f)
1490 1490 except (IOError, OSError):
1491 1491 # A file become inaccessible in between? Mark it as deleted,
1492 1492 # matching dirstate behavior (issue5584).
1493 1493 # The dirstate has more complex behavior around whether a
1494 1494 # missing file matches a directory, etc, but we don't need to
1495 1495 # bother with that: if f has made it to this point, we're sure
1496 1496 # it's in the dirstate.
1497 1497 deleted.append(f)
1498 1498
1499 1499 return modified, deleted, fixup
1500 1500
1501 1501 def _poststatusfixup(self, status, fixup):
1502 1502 """update dirstate for files that are actually clean"""
1503 1503 poststatus = self._repo.postdsstatus()
1504 1504 if fixup or poststatus:
1505 1505 try:
1506 1506 oldid = self._repo.dirstate.identity()
1507 1507
1508 1508 # updating the dirstate is optional
1509 1509 # so we don't wait on the lock
1510 1510 # wlock can invalidate the dirstate, so cache normal _after_
1511 1511 # taking the lock
1512 1512 with self._repo.wlock(False):
1513 1513 if self._repo.dirstate.identity() == oldid:
1514 1514 if fixup:
1515 1515 normal = self._repo.dirstate.normal
1516 1516 for f in fixup:
1517 1517 normal(f)
1518 1518 # write changes out explicitly, because nesting
1519 1519 # wlock at runtime may prevent 'wlock.release()'
1520 1520 # after this block from doing so for subsequent
1521 1521 # changing files
1522 1522 tr = self._repo.currenttransaction()
1523 1523 self._repo.dirstate.write(tr)
1524 1524
1525 1525 if poststatus:
1526 1526 for ps in poststatus:
1527 1527 ps(self, status)
1528 1528 else:
1529 1529 # in this case, writing changes out breaks
1530 1530 # consistency, because .hg/dirstate was
1531 1531 # already changed simultaneously after last
1532 1532 # caching (see also issue5584 for detail)
1533 1533 self._repo.ui.debug('skip updating dirstate: '
1534 1534 'identity mismatch\n')
1535 1535 except error.LockError:
1536 1536 pass
1537 1537 finally:
1538 1538 # Even if the wlock couldn't be grabbed, clear out the list.
1539 1539 self._repo.clearpostdsstatus()
1540 1540
1541 1541 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1542 1542 '''Gets the status from the dirstate -- internal use only.'''
1543 1543 subrepos = []
1544 1544 if '.hgsub' in self:
1545 1545 subrepos = sorted(self.substate)
1546 1546 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1547 1547 clean=clean, unknown=unknown)
1548 1548
1549 1549 # check for any possibly clean files
1550 1550 fixup = []
1551 1551 if cmp:
1552 1552 modified2, deleted2, fixup = self._checklookup(cmp)
1553 1553 s.modified.extend(modified2)
1554 1554 s.deleted.extend(deleted2)
1555 1555
1556 1556 if fixup and clean:
1557 1557 s.clean.extend(fixup)
1558 1558
1559 1559 self._poststatusfixup(s, fixup)
1560 1560
1561 1561 if match.always():
1562 1562 # cache for performance
1563 1563 if s.unknown or s.ignored or s.clean:
1564 1564 # "_status" is cached with list*=False in the normal route
1565 1565 self._status = scmutil.status(s.modified, s.added, s.removed,
1566 1566 s.deleted, [], [], [])
1567 1567 else:
1568 1568 self._status = s
1569 1569
1570 1570 return s
1571 1571
1572 1572 @propertycache
1573 1573 def _manifest(self):
1574 1574 """generate a manifest corresponding to the values in self._status
1575 1575
1576 1576 This reuse the file nodeid from parent, but we use special node
1577 1577 identifiers for added and modified files. This is used by manifests
1578 1578 merge to see that files are different and by update logic to avoid
1579 1579 deleting newly added files.
1580 1580 """
1581 1581 return self._buildstatusmanifest(self._status)
1582 1582
1583 1583 def _buildstatusmanifest(self, status):
1584 1584 """Builds a manifest that includes the given status results."""
1585 1585 parents = self.parents()
1586 1586
1587 1587 man = parents[0].manifest().copy()
1588 1588
1589 1589 ff = self._flagfunc
1590 1590 for i, l in ((addednodeid, status.added),
1591 1591 (modifiednodeid, status.modified)):
1592 1592 for f in l:
1593 1593 man[f] = i
1594 1594 try:
1595 1595 man.setflag(f, ff(f))
1596 1596 except OSError:
1597 1597 pass
1598 1598
1599 1599 for f in status.deleted + status.removed:
1600 1600 if f in man:
1601 1601 del man[f]
1602 1602
1603 1603 return man
1604 1604
1605 1605 def _buildstatus(self, other, s, match, listignored, listclean,
1606 1606 listunknown):
1607 1607 """build a status with respect to another context
1608 1608
1609 1609 This includes logic for maintaining the fast path of status when
1610 1610 comparing the working directory against its parent, which is to skip
1611 1611 building a new manifest if self (working directory) is not comparing
1612 1612 against its parent (repo['.']).
1613 1613 """
1614 1614 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1615 1615 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1616 1616 # might have accidentally ended up with the entire contents of the file
1617 1617 # they are supposed to be linking to.
1618 1618 s.modified[:] = self._filtersuspectsymlink(s.modified)
1619 1619 if other != self._repo['.']:
1620 1620 s = super(workingctx, self)._buildstatus(other, s, match,
1621 1621 listignored, listclean,
1622 1622 listunknown)
1623 1623 return s
1624 1624
1625 1625 def _matchstatus(self, other, match):
1626 1626 """override the match method with a filter for directory patterns
1627 1627
1628 1628 We use inheritance to customize the match.bad method only in cases of
1629 1629 workingctx since it belongs only to the working directory when
1630 1630 comparing against the parent changeset.
1631 1631
1632 1632 If we aren't comparing against the working directory's parent, then we
1633 1633 just use the default match object sent to us.
1634 1634 """
1635 1635 if other != self._repo['.']:
1636 1636 def bad(f, msg):
1637 1637 # 'f' may be a directory pattern from 'match.files()',
1638 1638 # so 'f not in ctx1' is not enough
1639 1639 if f not in other and not other.hasdir(f):
1640 1640 self._repo.ui.warn('%s: %s\n' %
1641 1641 (self._repo.dirstate.pathto(f), msg))
1642 1642 match.bad = bad
1643 1643 return match
1644 1644
1645 1645 def markcommitted(self, node):
1646 1646 super(workingctx, self).markcommitted(node)
1647 1647
1648 1648 sparse.aftercommit(self._repo, node)
1649 1649
1650 1650 class committablefilectx(basefilectx):
1651 1651 """A committablefilectx provides common functionality for a file context
1652 1652 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1653 1653 def __init__(self, repo, path, filelog=None, ctx=None):
1654 1654 self._repo = repo
1655 1655 self._path = path
1656 1656 self._changeid = None
1657 1657 self._filerev = self._filenode = None
1658 1658
1659 1659 if filelog is not None:
1660 1660 self._filelog = filelog
1661 1661 if ctx:
1662 1662 self._changectx = ctx
1663 1663
1664 1664 def __nonzero__(self):
1665 1665 return True
1666 1666
1667 1667 __bool__ = __nonzero__
1668 1668
1669 1669 def linkrev(self):
1670 1670 # linked to self._changectx no matter if file is modified or not
1671 1671 return self.rev()
1672 1672
1673 1673 def parents(self):
1674 1674 '''return parent filectxs, following copies if necessary'''
1675 1675 def filenode(ctx, path):
1676 1676 return ctx._manifest.get(path, nullid)
1677 1677
1678 1678 path = self._path
1679 1679 fl = self._filelog
1680 1680 pcl = self._changectx._parents
1681 1681 renamed = self.renamed()
1682 1682
1683 1683 if renamed:
1684 1684 pl = [renamed + (None,)]
1685 1685 else:
1686 1686 pl = [(path, filenode(pcl[0], path), fl)]
1687 1687
1688 1688 for pc in pcl[1:]:
1689 1689 pl.append((path, filenode(pc, path), fl))
1690 1690
1691 1691 return [self._parentfilectx(p, fileid=n, filelog=l)
1692 1692 for p, n, l in pl if n != nullid]
1693 1693
1694 1694 def children(self):
1695 1695 return []
1696 1696
1697 1697 class workingfilectx(committablefilectx):
1698 1698 """A workingfilectx object makes access to data related to a particular
1699 1699 file in the working directory convenient."""
1700 1700 def __init__(self, repo, path, filelog=None, workingctx=None):
1701 1701 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1702 1702
1703 1703 @propertycache
1704 1704 def _changectx(self):
1705 1705 return workingctx(self._repo)
1706 1706
1707 1707 def data(self):
1708 1708 return self._repo.wread(self._path)
1709 1709 def renamed(self):
1710 1710 rp = self._repo.dirstate.copied(self._path)
1711 1711 if not rp:
1712 1712 return None
1713 1713 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1714 1714
1715 1715 def size(self):
1716 1716 return self._repo.wvfs.lstat(self._path).st_size
1717 1717 def date(self):
1718 1718 t, tz = self._changectx.date()
1719 1719 try:
1720 1720 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1721 1721 except OSError as err:
1722 1722 if err.errno != errno.ENOENT:
1723 1723 raise
1724 1724 return (t, tz)
1725 1725
1726 1726 def exists(self):
1727 1727 return self._repo.wvfs.exists(self._path)
1728 1728
1729 1729 def lexists(self):
1730 1730 return self._repo.wvfs.lexists(self._path)
1731 1731
1732 1732 def audit(self):
1733 1733 return self._repo.wvfs.audit(self._path)
1734 1734
1735 1735 def cmp(self, fctx):
1736 1736 """compare with other file context
1737 1737
1738 1738 returns True if different than fctx.
1739 1739 """
1740 1740 # fctx should be a filectx (not a workingfilectx)
1741 1741 # invert comparison to reuse the same code path
1742 1742 return fctx.cmp(self)
1743 1743
1744 1744 def remove(self, ignoremissing=False):
1745 1745 """wraps unlink for a repo's working directory"""
1746 1746 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1747 1747
1748 1748 def write(self, data, flags, backgroundclose=False, **kwargs):
1749 1749 """wraps repo.wwrite"""
1750 1750 self._repo.wwrite(self._path, data, flags,
1751 1751 backgroundclose=backgroundclose,
1752 1752 **kwargs)
1753 1753
1754 1754 def markcopied(self, src):
1755 1755 """marks this file a copy of `src`"""
1756 1756 if self._repo.dirstate[self._path] in "nma":
1757 1757 self._repo.dirstate.copy(src, self._path)
1758 1758
1759 1759 def clearunknown(self):
1760 1760 """Removes conflicting items in the working directory so that
1761 1761 ``write()`` can be called successfully.
1762 1762 """
1763 1763 wvfs = self._repo.wvfs
1764 1764 f = self._path
1765 1765 wvfs.audit(f)
1766 1766 if wvfs.isdir(f) and not wvfs.islink(f):
1767 1767 wvfs.rmtree(f, forcibly=True)
1768 1768 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1769 1769 for p in reversed(list(util.finddirs(f))):
1770 1770 if wvfs.isfileorlink(p):
1771 1771 wvfs.unlink(p)
1772 1772 break
1773 1773
1774 1774 def setflags(self, l, x):
1775 1775 self._repo.wvfs.setflags(self._path, l, x)
1776 1776
1777 1777 class overlayworkingctx(committablectx):
1778 1778 """Wraps another mutable context with a write-back cache that can be
1779 1779 converted into a commit context.
1780 1780
1781 1781 self._cache[path] maps to a dict with keys: {
1782 1782 'exists': bool?
1783 1783 'date': date?
1784 1784 'data': str?
1785 1785 'flags': str?
1786 1786 'copied': str? (path or None)
1787 1787 }
1788 1788 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1789 1789 is `False`, the file was deleted.
1790 1790 """
1791 1791
1792 1792 def __init__(self, repo):
1793 1793 super(overlayworkingctx, self).__init__(repo)
1794 1794 self.clean()
1795 1795
1796 1796 def setbase(self, wrappedctx):
1797 1797 self._wrappedctx = wrappedctx
1798 1798 self._parents = [wrappedctx]
1799 1799 # Drop old manifest cache as it is now out of date.
1800 1800 # This is necessary when, e.g., rebasing several nodes with one
1801 1801 # ``overlayworkingctx`` (e.g. with --collapse).
1802 1802 util.clearcachedproperty(self, '_manifest')
1803 1803
1804 1804 def data(self, path):
1805 1805 if self.isdirty(path):
1806 1806 if self._cache[path]['exists']:
1807 1807 if self._cache[path]['data']:
1808 1808 return self._cache[path]['data']
1809 1809 else:
1810 1810 # Must fallback here, too, because we only set flags.
1811 1811 return self._wrappedctx[path].data()
1812 1812 else:
1813 1813 raise error.ProgrammingError("No such file or directory: %s" %
1814 1814 path)
1815 1815 else:
1816 1816 return self._wrappedctx[path].data()
1817 1817
1818 1818 @propertycache
1819 1819 def _manifest(self):
1820 1820 parents = self.parents()
1821 1821 man = parents[0].manifest().copy()
1822 1822
1823 1823 flag = self._flagfunc
1824 1824 for path in self.added():
1825 1825 man[path] = addednodeid
1826 1826 man.setflag(path, flag(path))
1827 1827 for path in self.modified():
1828 1828 man[path] = modifiednodeid
1829 1829 man.setflag(path, flag(path))
1830 1830 for path in self.removed():
1831 1831 del man[path]
1832 1832 return man
1833 1833
1834 1834 @propertycache
1835 1835 def _flagfunc(self):
1836 1836 def f(path):
1837 1837 return self._cache[path]['flags']
1838 1838 return f
1839 1839
1840 1840 def files(self):
1841 1841 return sorted(self.added() + self.modified() + self.removed())
1842 1842
1843 1843 def modified(self):
1844 1844 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1845 1845 self._existsinparent(f)]
1846 1846
1847 1847 def added(self):
1848 1848 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1849 1849 not self._existsinparent(f)]
1850 1850
1851 1851 def removed(self):
1852 1852 return [f for f in self._cache.keys() if
1853 1853 not self._cache[f]['exists'] and self._existsinparent(f)]
1854 1854
1855 1855 def isinmemory(self):
1856 1856 return True
1857 1857
1858 1858 def filedate(self, path):
1859 1859 if self.isdirty(path):
1860 1860 return self._cache[path]['date']
1861 1861 else:
1862 1862 return self._wrappedctx[path].date()
1863 1863
1864 1864 def markcopied(self, path, origin):
1865 1865 if self.isdirty(path):
1866 1866 self._cache[path]['copied'] = origin
1867 1867 else:
1868 1868 raise error.ProgrammingError('markcopied() called on clean context')
1869 1869
1870 1870 def copydata(self, path):
1871 1871 if self.isdirty(path):
1872 1872 return self._cache[path]['copied']
1873 1873 else:
1874 1874 raise error.ProgrammingError('copydata() called on clean context')
1875 1875
1876 1876 def flags(self, path):
1877 1877 if self.isdirty(path):
1878 1878 if self._cache[path]['exists']:
1879 1879 return self._cache[path]['flags']
1880 1880 else:
1881 1881 raise error.ProgrammingError("No such file or directory: %s" %
1882 1882 self._path)
1883 1883 else:
1884 1884 return self._wrappedctx[path].flags()
1885 1885
1886 1886 def _existsinparent(self, path):
1887 1887 try:
1888 1888 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1889 1889 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1890 1890 # with an ``exists()`` function.
1891 1891 self._wrappedctx[path]
1892 1892 return True
1893 1893 except error.ManifestLookupError:
1894 1894 return False
1895 1895
1896 1896 def _auditconflicts(self, path):
1897 1897 """Replicates conflict checks done by wvfs.write().
1898 1898
1899 1899 Since we never write to the filesystem and never call `applyupdates` in
1900 1900 IMM, we'll never check that a path is actually writable -- e.g., because
1901 1901 it adds `a/foo`, but `a` is actually a file in the other commit.
1902 1902 """
1903 1903 def fail(path, component):
1904 1904 # p1() is the base and we're receiving "writes" for p2()'s
1905 1905 # files.
1906 1906 if 'l' in self.p1()[component].flags():
1907 1907 raise error.Abort("error: %s conflicts with symlink %s "
1908 1908 "in %s." % (path, component,
1909 1909 self.p1().rev()))
1910 1910 else:
1911 1911 raise error.Abort("error: '%s' conflicts with file '%s' in "
1912 1912 "%s." % (path, component,
1913 1913 self.p1().rev()))
1914 1914
1915 1915 # Test that each new directory to be created to write this path from p2
1916 1916 # is not a file in p1.
1917 1917 components = path.split('/')
1918 1918 for i in xrange(len(components)):
1919 1919 component = "/".join(components[0:i])
1920 1920 if component in self.p1():
1921 1921 fail(path, component)
1922 1922
1923 1923 # Test the other direction -- that this path from p2 isn't a directory
1924 1924 # in p1 (test that p1 doesn't any paths matching `path/*`).
1925 1925 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1926 1926 matches = self.p1().manifest().matches(match)
1927 1927 if len(matches) > 0:
1928 1928 if len(matches) == 1 and matches.keys()[0] == path:
1929 1929 return
1930 1930 raise error.Abort("error: file '%s' cannot be written because "
1931 1931 " '%s/' is a folder in %s (containing %d "
1932 1932 "entries: %s)"
1933 1933 % (path, path, self.p1(), len(matches),
1934 1934 ', '.join(matches.keys())))
1935 1935
1936 1936 def write(self, path, data, flags='', **kwargs):
1937 1937 if data is None:
1938 1938 raise error.ProgrammingError("data must be non-None")
1939 1939 self._auditconflicts(path)
1940 1940 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1941 1941 flags=flags)
1942 1942
1943 1943 def setflags(self, path, l, x):
1944 1944 self._markdirty(path, exists=True, date=dateutil.makedate(),
1945 1945 flags=(l and 'l' or '') + (x and 'x' or ''))
1946 1946
1947 1947 def remove(self, path):
1948 1948 self._markdirty(path, exists=False)
1949 1949
1950 1950 def exists(self, path):
1951 1951 """exists behaves like `lexists`, but needs to follow symlinks and
1952 1952 return False if they are broken.
1953 1953 """
1954 1954 if self.isdirty(path):
1955 1955 # If this path exists and is a symlink, "follow" it by calling
1956 1956 # exists on the destination path.
1957 1957 if (self._cache[path]['exists'] and
1958 1958 'l' in self._cache[path]['flags']):
1959 1959 return self.exists(self._cache[path]['data'].strip())
1960 1960 else:
1961 1961 return self._cache[path]['exists']
1962 1962
1963 1963 return self._existsinparent(path)
1964 1964
1965 1965 def lexists(self, path):
1966 1966 """lexists returns True if the path exists"""
1967 1967 if self.isdirty(path):
1968 1968 return self._cache[path]['exists']
1969 1969
1970 1970 return self._existsinparent(path)
1971 1971
1972 1972 def size(self, path):
1973 1973 if self.isdirty(path):
1974 1974 if self._cache[path]['exists']:
1975 1975 return len(self._cache[path]['data'])
1976 1976 else:
1977 1977 raise error.ProgrammingError("No such file or directory: %s" %
1978 1978 self._path)
1979 1979 return self._wrappedctx[path].size()
1980 1980
1981 1981 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1982 1982 user=None, editor=None):
1983 1983 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1984 1984 committed.
1985 1985
1986 1986 ``text`` is the commit message.
1987 1987 ``parents`` (optional) are rev numbers.
1988 1988 """
1989 1989 # Default parents to the wrapped contexts' if not passed.
1990 1990 if parents is None:
1991 1991 parents = self._wrappedctx.parents()
1992 1992 if len(parents) == 1:
1993 1993 parents = (parents[0], None)
1994 1994
1995 1995 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1996 1996 if parents[1] is None:
1997 1997 parents = (self._repo[parents[0]], None)
1998 1998 else:
1999 1999 parents = (self._repo[parents[0]], self._repo[parents[1]])
2000 2000
2001 2001 files = self._cache.keys()
2002 2002 def getfile(repo, memctx, path):
2003 2003 if self._cache[path]['exists']:
2004 2004 return memfilectx(repo, memctx, path,
2005 2005 self._cache[path]['data'],
2006 2006 'l' in self._cache[path]['flags'],
2007 2007 'x' in self._cache[path]['flags'],
2008 2008 self._cache[path]['copied'])
2009 2009 else:
2010 2010 # Returning None, but including the path in `files`, is
2011 2011 # necessary for memctx to register a deletion.
2012 2012 return None
2013 2013 return memctx(self._repo, parents, text, files, getfile, date=date,
2014 2014 extra=extra, user=user, branch=branch, editor=editor)
2015 2015
2016 2016 def isdirty(self, path):
2017 2017 return path in self._cache
2018 2018
2019 2019 def isempty(self):
2020 2020 # We need to discard any keys that are actually clean before the empty
2021 2021 # commit check.
2022 2022 self._compact()
2023 2023 return len(self._cache) == 0
2024 2024
2025 2025 def clean(self):
2026 2026 self._cache = {}
2027 2027
2028 2028 def _compact(self):
2029 2029 """Removes keys from the cache that are actually clean, by comparing
2030 2030 them with the underlying context.
2031 2031
2032 2032 This can occur during the merge process, e.g. by passing --tool :local
2033 2033 to resolve a conflict.
2034 2034 """
2035 2035 keys = []
2036 2036 for path in self._cache.keys():
2037 2037 cache = self._cache[path]
2038 2038 try:
2039 2039 underlying = self._wrappedctx[path]
2040 2040 if (underlying.data() == cache['data'] and
2041 2041 underlying.flags() == cache['flags']):
2042 2042 keys.append(path)
2043 2043 except error.ManifestLookupError:
2044 2044 # Path not in the underlying manifest (created).
2045 2045 continue
2046 2046
2047 2047 for path in keys:
2048 2048 del self._cache[path]
2049 2049 return keys
2050 2050
2051 2051 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2052 2052 self._cache[path] = {
2053 2053 'exists': exists,
2054 2054 'data': data,
2055 2055 'date': date,
2056 2056 'flags': flags,
2057 2057 'copied': None,
2058 2058 }
2059 2059
2060 2060 def filectx(self, path, filelog=None):
2061 2061 return overlayworkingfilectx(self._repo, path, parent=self,
2062 2062 filelog=filelog)
2063 2063
2064 2064 class overlayworkingfilectx(committablefilectx):
2065 2065 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2066 2066 cache, which can be flushed through later by calling ``flush()``."""
2067 2067
2068 2068 def __init__(self, repo, path, filelog=None, parent=None):
2069 2069 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2070 2070 parent)
2071 2071 self._repo = repo
2072 2072 self._parent = parent
2073 2073 self._path = path
2074 2074
2075 2075 def cmp(self, fctx):
2076 2076 return self.data() != fctx.data()
2077 2077
2078 2078 def changectx(self):
2079 2079 return self._parent
2080 2080
2081 2081 def data(self):
2082 2082 return self._parent.data(self._path)
2083 2083
2084 2084 def date(self):
2085 2085 return self._parent.filedate(self._path)
2086 2086
2087 2087 def exists(self):
2088 2088 return self.lexists()
2089 2089
2090 2090 def lexists(self):
2091 2091 return self._parent.exists(self._path)
2092 2092
2093 2093 def renamed(self):
2094 2094 path = self._parent.copydata(self._path)
2095 2095 if not path:
2096 2096 return None
2097 2097 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2098 2098
2099 2099 def size(self):
2100 2100 return self._parent.size(self._path)
2101 2101
2102 2102 def markcopied(self, origin):
2103 2103 self._parent.markcopied(self._path, origin)
2104 2104
2105 2105 def audit(self):
2106 2106 pass
2107 2107
2108 2108 def flags(self):
2109 2109 return self._parent.flags(self._path)
2110 2110
2111 2111 def setflags(self, islink, isexec):
2112 2112 return self._parent.setflags(self._path, islink, isexec)
2113 2113
2114 2114 def write(self, data, flags, backgroundclose=False, **kwargs):
2115 2115 return self._parent.write(self._path, data, flags, **kwargs)
2116 2116
2117 2117 def remove(self, ignoremissing=False):
2118 2118 return self._parent.remove(self._path)
2119 2119
2120 2120 def clearunknown(self):
2121 2121 pass
2122 2122
2123 2123 class workingcommitctx(workingctx):
2124 2124 """A workingcommitctx object makes access to data related to
2125 2125 the revision being committed convenient.
2126 2126
2127 2127 This hides changes in the working directory, if they aren't
2128 2128 committed in this context.
2129 2129 """
2130 2130 def __init__(self, repo, changes,
2131 2131 text="", user=None, date=None, extra=None):
2132 2132 super(workingctx, self).__init__(repo, text, user, date, extra,
2133 2133 changes)
2134 2134
2135 2135 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2136 2136 """Return matched files only in ``self._status``
2137 2137
2138 2138 Uncommitted files appear "clean" via this context, even if
2139 2139 they aren't actually so in the working directory.
2140 2140 """
2141 2141 if clean:
2142 2142 clean = [f for f in self._manifest if f not in self._changedset]
2143 2143 else:
2144 2144 clean = []
2145 2145 return scmutil.status([f for f in self._status.modified if match(f)],
2146 2146 [f for f in self._status.added if match(f)],
2147 2147 [f for f in self._status.removed if match(f)],
2148 2148 [], [], [], clean)
2149 2149
2150 2150 @propertycache
2151 2151 def _changedset(self):
2152 2152 """Return the set of files changed in this context
2153 2153 """
2154 2154 changed = set(self._status.modified)
2155 2155 changed.update(self._status.added)
2156 2156 changed.update(self._status.removed)
2157 2157 return changed
2158 2158
2159 2159 def makecachingfilectxfn(func):
2160 2160 """Create a filectxfn that caches based on the path.
2161 2161
2162 2162 We can't use util.cachefunc because it uses all arguments as the cache
2163 2163 key and this creates a cycle since the arguments include the repo and
2164 2164 memctx.
2165 2165 """
2166 2166 cache = {}
2167 2167
2168 2168 def getfilectx(repo, memctx, path):
2169 2169 if path not in cache:
2170 2170 cache[path] = func(repo, memctx, path)
2171 2171 return cache[path]
2172 2172
2173 2173 return getfilectx
2174 2174
2175 2175 def memfilefromctx(ctx):
2176 2176 """Given a context return a memfilectx for ctx[path]
2177 2177
2178 2178 This is a convenience method for building a memctx based on another
2179 2179 context.
2180 2180 """
2181 2181 def getfilectx(repo, memctx, path):
2182 2182 fctx = ctx[path]
2183 2183 # this is weird but apparently we only keep track of one parent
2184 2184 # (why not only store that instead of a tuple?)
2185 2185 copied = fctx.renamed()
2186 2186 if copied:
2187 2187 copied = copied[0]
2188 2188 return memfilectx(repo, memctx, path, fctx.data(),
2189 2189 islink=fctx.islink(), isexec=fctx.isexec(),
2190 2190 copied=copied)
2191 2191
2192 2192 return getfilectx
2193 2193
2194 2194 def memfilefrompatch(patchstore):
2195 2195 """Given a patch (e.g. patchstore object) return a memfilectx
2196 2196
2197 2197 This is a convenience method for building a memctx based on a patchstore.
2198 2198 """
2199 2199 def getfilectx(repo, memctx, path):
2200 2200 data, mode, copied = patchstore.getfile(path)
2201 2201 if data is None:
2202 2202 return None
2203 2203 islink, isexec = mode
2204 2204 return memfilectx(repo, memctx, path, data, islink=islink,
2205 2205 isexec=isexec, copied=copied)
2206 2206
2207 2207 return getfilectx
2208 2208
2209 2209 class memctx(committablectx):
2210 2210 """Use memctx to perform in-memory commits via localrepo.commitctx().
2211 2211
2212 2212 Revision information is supplied at initialization time while
2213 2213 related files data and is made available through a callback
2214 2214 mechanism. 'repo' is the current localrepo, 'parents' is a
2215 2215 sequence of two parent revisions identifiers (pass None for every
2216 2216 missing parent), 'text' is the commit message and 'files' lists
2217 2217 names of files touched by the revision (normalized and relative to
2218 2218 repository root).
2219 2219
2220 2220 filectxfn(repo, memctx, path) is a callable receiving the
2221 2221 repository, the current memctx object and the normalized path of
2222 2222 requested file, relative to repository root. It is fired by the
2223 2223 commit function for every file in 'files', but calls order is
2224 2224 undefined. If the file is available in the revision being
2225 2225 committed (updated or added), filectxfn returns a memfilectx
2226 2226 object. If the file was removed, filectxfn return None for recent
2227 2227 Mercurial. Moved files are represented by marking the source file
2228 2228 removed and the new file added with copy information (see
2229 2229 memfilectx).
2230 2230
2231 2231 user receives the committer name and defaults to current
2232 2232 repository username, date is the commit date in any format
2233 2233 supported by dateutil.parsedate() and defaults to current date, extra
2234 2234 is a dictionary of metadata or is left empty.
2235 2235 """
2236 2236
2237 2237 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2238 2238 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2239 2239 # this field to determine what to do in filectxfn.
2240 2240 _returnnoneformissingfiles = True
2241 2241
2242 2242 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2243 2243 date=None, extra=None, branch=None, editor=False):
2244 2244 super(memctx, self).__init__(repo, text, user, date, extra)
2245 2245 self._rev = None
2246 2246 self._node = None
2247 2247 parents = [(p or nullid) for p in parents]
2248 2248 p1, p2 = parents
2249 2249 self._parents = [self._repo[p] for p in (p1, p2)]
2250 2250 files = sorted(set(files))
2251 2251 self._files = files
2252 2252 if branch is not None:
2253 2253 self._extra['branch'] = encoding.fromlocal(branch)
2254 2254 self.substate = {}
2255 2255
2256 2256 if isinstance(filectxfn, patch.filestore):
2257 2257 filectxfn = memfilefrompatch(filectxfn)
2258 2258 elif not callable(filectxfn):
2259 2259 # if store is not callable, wrap it in a function
2260 2260 filectxfn = memfilefromctx(filectxfn)
2261 2261
2262 2262 # memoizing increases performance for e.g. vcs convert scenarios.
2263 2263 self._filectxfn = makecachingfilectxfn(filectxfn)
2264 2264
2265 2265 if editor:
2266 2266 self._text = editor(self._repo, self, [])
2267 2267 self._repo.savecommitmessage(self._text)
2268 2268
2269 2269 def filectx(self, path, filelog=None):
2270 2270 """get a file context from the working directory
2271 2271
2272 2272 Returns None if file doesn't exist and should be removed."""
2273 2273 return self._filectxfn(self._repo, self, path)
2274 2274
2275 2275 def commit(self):
2276 2276 """commit context to the repo"""
2277 2277 return self._repo.commitctx(self)
2278 2278
2279 2279 @propertycache
2280 2280 def _manifest(self):
2281 2281 """generate a manifest based on the return values of filectxfn"""
2282 2282
2283 2283 # keep this simple for now; just worry about p1
2284 2284 pctx = self._parents[0]
2285 2285 man = pctx.manifest().copy()
2286 2286
2287 2287 for f in self._status.modified:
2288 2288 p1node = nullid
2289 2289 p2node = nullid
2290 2290 p = pctx[f].parents() # if file isn't in pctx, check p2?
2291 2291 if len(p) > 0:
2292 2292 p1node = p[0].filenode()
2293 2293 if len(p) > 1:
2294 2294 p2node = p[1].filenode()
2295 2295 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2296 2296
2297 2297 for f in self._status.added:
2298 2298 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2299 2299
2300 2300 for f in self._status.removed:
2301 2301 if f in man:
2302 2302 del man[f]
2303 2303
2304 2304 return man
2305 2305
2306 2306 @propertycache
2307 2307 def _status(self):
2308 2308 """Calculate exact status from ``files`` specified at construction
2309 2309 """
2310 2310 man1 = self.p1().manifest()
2311 2311 p2 = self._parents[1]
2312 2312 # "1 < len(self._parents)" can't be used for checking
2313 2313 # existence of the 2nd parent, because "memctx._parents" is
2314 2314 # explicitly initialized by the list, of which length is 2.
2315 2315 if p2.node() != nullid:
2316 2316 man2 = p2.manifest()
2317 2317 managing = lambda f: f in man1 or f in man2
2318 2318 else:
2319 2319 managing = lambda f: f in man1
2320 2320
2321 2321 modified, added, removed = [], [], []
2322 2322 for f in self._files:
2323 2323 if not managing(f):
2324 2324 added.append(f)
2325 2325 elif self[f]:
2326 2326 modified.append(f)
2327 2327 else:
2328 2328 removed.append(f)
2329 2329
2330 2330 return scmutil.status(modified, added, removed, [], [], [], [])
2331 2331
2332 2332 class memfilectx(committablefilectx):
2333 2333 """memfilectx represents an in-memory file to commit.
2334 2334
2335 2335 See memctx and committablefilectx for more details.
2336 2336 """
2337 2337 def __init__(self, repo, changectx, path, data, islink=False,
2338 2338 isexec=False, copied=None):
2339 2339 """
2340 2340 path is the normalized file path relative to repository root.
2341 2341 data is the file content as a string.
2342 2342 islink is True if the file is a symbolic link.
2343 2343 isexec is True if the file is executable.
2344 2344 copied is the source file path if current file was copied in the
2345 2345 revision being committed, or None."""
2346 2346 super(memfilectx, self).__init__(repo, path, None, changectx)
2347 2347 self._data = data
2348 2348 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2349 2349 self._copied = None
2350 2350 if copied:
2351 2351 self._copied = (copied, nullid)
2352 2352
2353 2353 def data(self):
2354 2354 return self._data
2355 2355
2356 2356 def remove(self, ignoremissing=False):
2357 2357 """wraps unlink for a repo's working directory"""
2358 2358 # need to figure out what to do here
2359 2359 del self._changectx[self._path]
2360 2360
2361 2361 def write(self, data, flags, **kwargs):
2362 2362 """wraps repo.wwrite"""
2363 2363 self._data = data
2364 2364
2365 2365 class overlayfilectx(committablefilectx):
2366 2366 """Like memfilectx but take an original filectx and optional parameters to
2367 2367 override parts of it. This is useful when fctx.data() is expensive (i.e.
2368 2368 flag processor is expensive) and raw data, flags, and filenode could be
2369 2369 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2370 2370 """
2371 2371
2372 2372 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2373 2373 copied=None, ctx=None):
2374 2374 """originalfctx: filecontext to duplicate
2375 2375
2376 2376 datafunc: None or a function to override data (file content). It is a
2377 2377 function to be lazy. path, flags, copied, ctx: None or overridden value
2378 2378
2379 2379 copied could be (path, rev), or False. copied could also be just path,
2380 2380 and will be converted to (path, nullid). This simplifies some callers.
2381 2381 """
2382 2382
2383 2383 if path is None:
2384 2384 path = originalfctx.path()
2385 2385 if ctx is None:
2386 2386 ctx = originalfctx.changectx()
2387 2387 ctxmatch = lambda: True
2388 2388 else:
2389 2389 ctxmatch = lambda: ctx == originalfctx.changectx()
2390 2390
2391 2391 repo = originalfctx.repo()
2392 2392 flog = originalfctx.filelog()
2393 2393 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2394 2394
2395 2395 if copied is None:
2396 2396 copied = originalfctx.renamed()
2397 2397 copiedmatch = lambda: True
2398 2398 else:
2399 2399 if copied and not isinstance(copied, tuple):
2400 2400 # repo._filecommit will recalculate copyrev so nullid is okay
2401 2401 copied = (copied, nullid)
2402 2402 copiedmatch = lambda: copied == originalfctx.renamed()
2403 2403
2404 2404 # When data, copied (could affect data), ctx (could affect filelog
2405 2405 # parents) are not overridden, rawdata, rawflags, and filenode may be
2406 2406 # reused (repo._filecommit should double check filelog parents).
2407 2407 #
2408 2408 # path, flags are not hashed in filelog (but in manifestlog) so they do
2409 2409 # not affect reusable here.
2410 2410 #
2411 2411 # If ctx or copied is overridden to a same value with originalfctx,
2412 2412 # still consider it's reusable. originalfctx.renamed() may be a bit
2413 2413 # expensive so it's not called unless necessary. Assuming datafunc is
2414 2414 # always expensive, do not call it for this "reusable" test.
2415 2415 reusable = datafunc is None and ctxmatch() and copiedmatch()
2416 2416
2417 2417 if datafunc is None:
2418 2418 datafunc = originalfctx.data
2419 2419 if flags is None:
2420 2420 flags = originalfctx.flags()
2421 2421
2422 2422 self._datafunc = datafunc
2423 2423 self._flags = flags
2424 2424 self._copied = copied
2425 2425
2426 2426 if reusable:
2427 2427 # copy extra fields from originalfctx
2428 2428 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2429 2429 for attr_ in attrs:
2430 2430 if util.safehasattr(originalfctx, attr_):
2431 2431 setattr(self, attr_, getattr(originalfctx, attr_))
2432 2432
2433 2433 def data(self):
2434 2434 return self._datafunc()
2435 2435
2436 2436 class metadataonlyctx(committablectx):
2437 2437 """Like memctx but it's reusing the manifest of different commit.
2438 2438 Intended to be used by lightweight operations that are creating
2439 2439 metadata-only changes.
2440 2440
2441 2441 Revision information is supplied at initialization time. 'repo' is the
2442 2442 current localrepo, 'ctx' is original revision which manifest we're reuisng
2443 2443 'parents' is a sequence of two parent revisions identifiers (pass None for
2444 2444 every missing parent), 'text' is the commit.
2445 2445
2446 2446 user receives the committer name and defaults to current repository
2447 2447 username, date is the commit date in any format supported by
2448 2448 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2449 2449 metadata or is left empty.
2450 2450 """
2451 2451 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2452 2452 date=None, extra=None, editor=False):
2453 2453 if text is None:
2454 2454 text = originalctx.description()
2455 2455 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2456 2456 self._rev = None
2457 2457 self._node = None
2458 2458 self._originalctx = originalctx
2459 2459 self._manifestnode = originalctx.manifestnode()
2460 2460 if parents is None:
2461 2461 parents = originalctx.parents()
2462 2462 else:
2463 2463 parents = [repo[p] for p in parents if p is not None]
2464 2464 parents = parents[:]
2465 2465 while len(parents) < 2:
2466 2466 parents.append(repo[nullid])
2467 2467 p1, p2 = self._parents = parents
2468 2468
2469 2469 # sanity check to ensure that the reused manifest parents are
2470 2470 # manifests of our commit parents
2471 2471 mp1, mp2 = self.manifestctx().parents
2472 2472 if p1 != nullid and p1.manifestnode() != mp1:
2473 2473 raise RuntimeError('can\'t reuse the manifest: '
2474 2474 'its p1 doesn\'t match the new ctx p1')
2475 2475 if p2 != nullid and p2.manifestnode() != mp2:
2476 2476 raise RuntimeError('can\'t reuse the manifest: '
2477 2477 'its p2 doesn\'t match the new ctx p2')
2478 2478
2479 2479 self._files = originalctx.files()
2480 2480 self.substate = {}
2481 2481
2482 2482 if editor:
2483 2483 self._text = editor(self._repo, self, [])
2484 2484 self._repo.savecommitmessage(self._text)
2485 2485
2486 2486 def manifestnode(self):
2487 2487 return self._manifestnode
2488 2488
2489 2489 @property
2490 2490 def _manifestctx(self):
2491 2491 return self._repo.manifestlog[self._manifestnode]
2492 2492
2493 2493 def filectx(self, path, filelog=None):
2494 2494 return self._originalctx.filectx(path, filelog=filelog)
2495 2495
2496 2496 def commit(self):
2497 2497 """commit context to the repo"""
2498 2498 return self._repo.commitctx(self)
2499 2499
2500 2500 @property
2501 2501 def _manifest(self):
2502 2502 return self._originalctx.manifest()
2503 2503
2504 2504 @propertycache
2505 2505 def _status(self):
2506 2506 """Calculate exact status from ``files`` specified in the ``origctx``
2507 2507 and parents manifests.
2508 2508 """
2509 2509 man1 = self.p1().manifest()
2510 2510 p2 = self._parents[1]
2511 2511 # "1 < len(self._parents)" can't be used for checking
2512 2512 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2513 2513 # explicitly initialized by the list, of which length is 2.
2514 2514 if p2.node() != nullid:
2515 2515 man2 = p2.manifest()
2516 2516 managing = lambda f: f in man1 or f in man2
2517 2517 else:
2518 2518 managing = lambda f: f in man1
2519 2519
2520 2520 modified, added, removed = [], [], []
2521 2521 for f in self._files:
2522 2522 if not managing(f):
2523 2523 added.append(f)
2524 2524 elif f in self:
2525 2525 modified.append(f)
2526 2526 else:
2527 2527 removed.append(f)
2528 2528
2529 2529 return scmutil.status(modified, added, removed, [], [], [], [])
2530 2530
2531 2531 class arbitraryfilectx(object):
2532 2532 """Allows you to use filectx-like functions on a file in an arbitrary
2533 2533 location on disk, possibly not in the working directory.
2534 2534 """
2535 2535 def __init__(self, path, repo=None):
2536 2536 # Repo is optional because contrib/simplemerge uses this class.
2537 2537 self._repo = repo
2538 2538 self._path = path
2539 2539
2540 2540 def cmp(self, fctx):
2541 2541 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2542 2542 # path if either side is a symlink.
2543 2543 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2544 2544 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2545 2545 # Add a fast-path for merge if both sides are disk-backed.
2546 2546 # Note that filecmp uses the opposite return values (True if same)
2547 2547 # from our cmp functions (True if different).
2548 2548 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2549 2549 return self.data() != fctx.data()
2550 2550
2551 2551 def path(self):
2552 2552 return self._path
2553 2553
2554 2554 def flags(self):
2555 2555 return ''
2556 2556
2557 2557 def data(self):
2558 2558 return util.readfile(self._path)
2559 2559
2560 2560 def decodeddata(self):
2561 2561 with open(self._path, "rb") as f:
2562 2562 return f.read()
2563 2563
2564 2564 def remove(self):
2565 2565 util.unlink(self._path)
2566 2566
2567 2567 def write(self, data, flags, **kwargs):
2568 2568 assert not flags
2569 2569 with open(self._path, "w") as f:
2570 2570 f.write(data)
@@ -1,1489 +1,1498 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import re
15 15 import socket
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 hex,
22 22 nullid,
23 23 short,
24 24 wdirid,
25 25 wdirrev,
26 26 )
27 27
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 match as matchmod,
32 32 obsolete,
33 33 obsutil,
34 34 pathutil,
35 35 phases,
36 36 pycompat,
37 37 revsetlang,
38 38 similar,
39 39 url,
40 40 util,
41 41 vfs,
42 42 )
43 43
44 44 from .utils import (
45 45 procutil,
46 46 stringutil,
47 47 )
48 48
49 49 if pycompat.iswindows:
50 50 from . import scmwindows as scmplatform
51 51 else:
52 52 from . import scmposix as scmplatform
53 53
54 54 termsize = scmplatform.termsize
55 55
56 56 class status(tuple):
57 57 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
58 58 and 'ignored' properties are only relevant to the working copy.
59 59 '''
60 60
61 61 __slots__ = ()
62 62
63 63 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
64 64 clean):
65 65 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
66 66 ignored, clean))
67 67
68 68 @property
69 69 def modified(self):
70 70 '''files that have been modified'''
71 71 return self[0]
72 72
73 73 @property
74 74 def added(self):
75 75 '''files that have been added'''
76 76 return self[1]
77 77
78 78 @property
79 79 def removed(self):
80 80 '''files that have been removed'''
81 81 return self[2]
82 82
83 83 @property
84 84 def deleted(self):
85 85 '''files that are in the dirstate, but have been deleted from the
86 86 working copy (aka "missing")
87 87 '''
88 88 return self[3]
89 89
90 90 @property
91 91 def unknown(self):
92 92 '''files not in the dirstate that are not ignored'''
93 93 return self[4]
94 94
95 95 @property
96 96 def ignored(self):
97 97 '''files not in the dirstate that are ignored (by _dirignore())'''
98 98 return self[5]
99 99
100 100 @property
101 101 def clean(self):
102 102 '''files that have not been modified'''
103 103 return self[6]
104 104
105 105 def __repr__(self, *args, **kwargs):
106 106 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
107 107 'unknown=%r, ignored=%r, clean=%r>') % self)
108 108
109 109 def itersubrepos(ctx1, ctx2):
110 110 """find subrepos in ctx1 or ctx2"""
111 111 # Create a (subpath, ctx) mapping where we prefer subpaths from
112 112 # ctx1. The subpaths from ctx2 are important when the .hgsub file
113 113 # has been modified (in ctx2) but not yet committed (in ctx1).
114 114 subpaths = dict.fromkeys(ctx2.substate, ctx2)
115 115 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
116 116
117 117 missing = set()
118 118
119 119 for subpath in ctx2.substate:
120 120 if subpath not in ctx1.substate:
121 121 del subpaths[subpath]
122 122 missing.add(subpath)
123 123
124 124 for subpath, ctx in sorted(subpaths.iteritems()):
125 125 yield subpath, ctx.sub(subpath)
126 126
127 127 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
128 128 # status and diff will have an accurate result when it does
129 129 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
130 130 # against itself.
131 131 for subpath in missing:
132 132 yield subpath, ctx2.nullsub(subpath, ctx1)
133 133
134 134 def nochangesfound(ui, repo, excluded=None):
135 135 '''Report no changes for push/pull, excluded is None or a list of
136 136 nodes excluded from the push/pull.
137 137 '''
138 138 secretlist = []
139 139 if excluded:
140 140 for n in excluded:
141 141 ctx = repo[n]
142 142 if ctx.phase() >= phases.secret and not ctx.extinct():
143 143 secretlist.append(n)
144 144
145 145 if secretlist:
146 146 ui.status(_("no changes found (ignored %d secret changesets)\n")
147 147 % len(secretlist))
148 148 else:
149 149 ui.status(_("no changes found\n"))
150 150
151 151 def callcatch(ui, func):
152 152 """call func() with global exception handling
153 153
154 154 return func() if no exception happens. otherwise do some error handling
155 155 and return an exit code accordingly. does not handle all exceptions.
156 156 """
157 157 try:
158 158 try:
159 159 return func()
160 160 except: # re-raises
161 161 ui.traceback()
162 162 raise
163 163 # Global exception handling, alphabetically
164 164 # Mercurial-specific first, followed by built-in and library exceptions
165 165 except error.LockHeld as inst:
166 166 if inst.errno == errno.ETIMEDOUT:
167 167 reason = _('timed out waiting for lock held by %r') % inst.locker
168 168 else:
169 169 reason = _('lock held by %r') % inst.locker
170 170 ui.warn(_("abort: %s: %s\n")
171 171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
172 172 if not inst.locker:
173 173 ui.warn(_("(lock might be very busy)\n"))
174 174 except error.LockUnavailable as inst:
175 175 ui.warn(_("abort: could not lock %s: %s\n") %
176 176 (inst.desc or stringutil.forcebytestr(inst.filename),
177 177 encoding.strtolocal(inst.strerror)))
178 178 except error.OutOfBandError as inst:
179 179 if inst.args:
180 180 msg = _("abort: remote error:\n")
181 181 else:
182 182 msg = _("abort: remote error\n")
183 183 ui.warn(msg)
184 184 if inst.args:
185 185 ui.warn(''.join(inst.args))
186 186 if inst.hint:
187 187 ui.warn('(%s)\n' % inst.hint)
188 188 except error.RepoError as inst:
189 189 ui.warn(_("abort: %s!\n") % inst)
190 190 if inst.hint:
191 191 ui.warn(_("(%s)\n") % inst.hint)
192 192 except error.ResponseError as inst:
193 193 ui.warn(_("abort: %s") % inst.args[0])
194 194 msg = inst.args[1]
195 195 if isinstance(msg, type(u'')):
196 196 msg = pycompat.sysbytes(msg)
197 197 if not isinstance(msg, bytes):
198 198 ui.warn(" %r\n" % (msg,))
199 199 elif not msg:
200 200 ui.warn(_(" empty string\n"))
201 201 else:
202 202 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
203 203 except error.CensoredNodeError as inst:
204 204 ui.warn(_("abort: file censored %s!\n") % inst)
205 205 except error.RevlogError as inst:
206 206 ui.warn(_("abort: %s!\n") % inst)
207 207 except error.InterventionRequired as inst:
208 208 ui.warn("%s\n" % inst)
209 209 if inst.hint:
210 210 ui.warn(_("(%s)\n") % inst.hint)
211 211 return 1
212 212 except error.WdirUnsupported:
213 213 ui.warn(_("abort: working directory revision cannot be specified\n"))
214 214 except error.Abort as inst:
215 215 ui.warn(_("abort: %s\n") % inst)
216 216 if inst.hint:
217 217 ui.warn(_("(%s)\n") % inst.hint)
218 218 except ImportError as inst:
219 219 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
220 220 m = stringutil.forcebytestr(inst).split()[-1]
221 221 if m in "mpatch bdiff".split():
222 222 ui.warn(_("(did you forget to compile extensions?)\n"))
223 223 elif m in "zlib".split():
224 224 ui.warn(_("(is your Python install correct?)\n"))
225 225 except IOError as inst:
226 226 if util.safehasattr(inst, "code"):
227 227 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
228 228 elif util.safehasattr(inst, "reason"):
229 229 try: # usually it is in the form (errno, strerror)
230 230 reason = inst.reason.args[1]
231 231 except (AttributeError, IndexError):
232 232 # it might be anything, for example a string
233 233 reason = inst.reason
234 234 if isinstance(reason, unicode):
235 235 # SSLError of Python 2.7.9 contains a unicode
236 236 reason = encoding.unitolocal(reason)
237 237 ui.warn(_("abort: error: %s\n") % reason)
238 238 elif (util.safehasattr(inst, "args")
239 239 and inst.args and inst.args[0] == errno.EPIPE):
240 240 pass
241 241 elif getattr(inst, "strerror", None):
242 242 if getattr(inst, "filename", None):
243 243 ui.warn(_("abort: %s: %s\n") % (
244 244 encoding.strtolocal(inst.strerror),
245 245 stringutil.forcebytestr(inst.filename)))
246 246 else:
247 247 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
248 248 else:
249 249 raise
250 250 except OSError as inst:
251 251 if getattr(inst, "filename", None) is not None:
252 252 ui.warn(_("abort: %s: '%s'\n") % (
253 253 encoding.strtolocal(inst.strerror),
254 254 stringutil.forcebytestr(inst.filename)))
255 255 else:
256 256 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
257 257 except MemoryError:
258 258 ui.warn(_("abort: out of memory\n"))
259 259 except SystemExit as inst:
260 260 # Commands shouldn't sys.exit directly, but give a return code.
261 261 # Just in case catch this and and pass exit code to caller.
262 262 return inst.code
263 263 except socket.error as inst:
264 264 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
265 265
266 266 return -1
267 267
268 268 def checknewlabel(repo, lbl, kind):
269 269 # Do not use the "kind" parameter in ui output.
270 270 # It makes strings difficult to translate.
271 271 if lbl in ['tip', '.', 'null']:
272 272 raise error.Abort(_("the name '%s' is reserved") % lbl)
273 273 for c in (':', '\0', '\n', '\r'):
274 274 if c in lbl:
275 275 raise error.Abort(
276 276 _("%r cannot be used in a name") % pycompat.bytestr(c))
277 277 try:
278 278 int(lbl)
279 279 raise error.Abort(_("cannot use an integer as a name"))
280 280 except ValueError:
281 281 pass
282 282 if lbl.strip() != lbl:
283 283 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
284 284
285 285 def checkfilename(f):
286 286 '''Check that the filename f is an acceptable filename for a tracked file'''
287 287 if '\r' in f or '\n' in f:
288 288 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
289 289
290 290 def checkportable(ui, f):
291 291 '''Check if filename f is portable and warn or abort depending on config'''
292 292 checkfilename(f)
293 293 abort, warn = checkportabilityalert(ui)
294 294 if abort or warn:
295 295 msg = util.checkwinfilename(f)
296 296 if msg:
297 297 msg = "%s: %s" % (msg, procutil.shellquote(f))
298 298 if abort:
299 299 raise error.Abort(msg)
300 300 ui.warn(_("warning: %s\n") % msg)
301 301
302 302 def checkportabilityalert(ui):
303 303 '''check if the user's config requests nothing, a warning, or abort for
304 304 non-portable filenames'''
305 305 val = ui.config('ui', 'portablefilenames')
306 306 lval = val.lower()
307 307 bval = stringutil.parsebool(val)
308 308 abort = pycompat.iswindows or lval == 'abort'
309 309 warn = bval or lval == 'warn'
310 310 if bval is None and not (warn or abort or lval == 'ignore'):
311 311 raise error.ConfigError(
312 312 _("ui.portablefilenames value is invalid ('%s')") % val)
313 313 return abort, warn
314 314
315 315 class casecollisionauditor(object):
316 316 def __init__(self, ui, abort, dirstate):
317 317 self._ui = ui
318 318 self._abort = abort
319 319 allfiles = '\0'.join(dirstate._map)
320 320 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
321 321 self._dirstate = dirstate
322 322 # The purpose of _newfiles is so that we don't complain about
323 323 # case collisions if someone were to call this object with the
324 324 # same filename twice.
325 325 self._newfiles = set()
326 326
327 327 def __call__(self, f):
328 328 if f in self._newfiles:
329 329 return
330 330 fl = encoding.lower(f)
331 331 if fl in self._loweredfiles and f not in self._dirstate:
332 332 msg = _('possible case-folding collision for %s') % f
333 333 if self._abort:
334 334 raise error.Abort(msg)
335 335 self._ui.warn(_("warning: %s\n") % msg)
336 336 self._loweredfiles.add(fl)
337 337 self._newfiles.add(f)
338 338
339 339 def filteredhash(repo, maxrev):
340 340 """build hash of filtered revisions in the current repoview.
341 341
342 342 Multiple caches perform up-to-date validation by checking that the
343 343 tiprev and tipnode stored in the cache file match the current repository.
344 344 However, this is not sufficient for validating repoviews because the set
345 345 of revisions in the view may change without the repository tiprev and
346 346 tipnode changing.
347 347
348 348 This function hashes all the revs filtered from the view and returns
349 349 that SHA-1 digest.
350 350 """
351 351 cl = repo.changelog
352 352 if not cl.filteredrevs:
353 353 return None
354 354 key = None
355 355 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
356 356 if revs:
357 357 s = hashlib.sha1()
358 358 for rev in revs:
359 359 s.update('%d;' % rev)
360 360 key = s.digest()
361 361 return key
362 362
363 363 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
364 364 '''yield every hg repository under path, always recursively.
365 365 The recurse flag will only control recursion into repo working dirs'''
366 366 def errhandler(err):
367 367 if err.filename == path:
368 368 raise err
369 369 samestat = getattr(os.path, 'samestat', None)
370 370 if followsym and samestat is not None:
371 371 def adddir(dirlst, dirname):
372 372 dirstat = os.stat(dirname)
373 373 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
374 374 if not match:
375 375 dirlst.append(dirstat)
376 376 return not match
377 377 else:
378 378 followsym = False
379 379
380 380 if (seen_dirs is None) and followsym:
381 381 seen_dirs = []
382 382 adddir(seen_dirs, path)
383 383 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
384 384 dirs.sort()
385 385 if '.hg' in dirs:
386 386 yield root # found a repository
387 387 qroot = os.path.join(root, '.hg', 'patches')
388 388 if os.path.isdir(os.path.join(qroot, '.hg')):
389 389 yield qroot # we have a patch queue repo here
390 390 if recurse:
391 391 # avoid recursing inside the .hg directory
392 392 dirs.remove('.hg')
393 393 else:
394 394 dirs[:] = [] # don't descend further
395 395 elif followsym:
396 396 newdirs = []
397 397 for d in dirs:
398 398 fname = os.path.join(root, d)
399 399 if adddir(seen_dirs, fname):
400 400 if os.path.islink(fname):
401 401 for hgname in walkrepos(fname, True, seen_dirs):
402 402 yield hgname
403 403 else:
404 404 newdirs.append(d)
405 405 dirs[:] = newdirs
406 406
407 407 def binnode(ctx):
408 408 """Return binary node id for a given basectx"""
409 409 node = ctx.node()
410 410 if node is None:
411 411 return wdirid
412 412 return node
413 413
414 414 def intrev(ctx):
415 415 """Return integer for a given basectx that can be used in comparison or
416 416 arithmetic operation"""
417 417 rev = ctx.rev()
418 418 if rev is None:
419 419 return wdirrev
420 420 return rev
421 421
422 422 def formatchangeid(ctx):
423 423 """Format changectx as '{rev}:{node|formatnode}', which is the default
424 424 template provided by logcmdutil.changesettemplater"""
425 425 repo = ctx.repo()
426 426 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
427 427
428 428 def formatrevnode(ui, rev, node):
429 429 """Format given revision and node depending on the current verbosity"""
430 430 if ui.debugflag:
431 431 hexfunc = hex
432 432 else:
433 433 hexfunc = short
434 434 return '%d:%s' % (rev, hexfunc(node))
435 435
436 def resolvepartialhexnodeid(repo, prefix):
437 # Uses unfiltered repo because it's faster when then prefix is ambiguous/
438 # This matches the "shortest" template function.
439 node = repo.unfiltered().changelog._partialmatch(prefix)
440 if node is None:
441 return
442 repo.changelog.rev(node) # make sure node isn't filtered
443 return node
444
436 445 def isrevsymbol(repo, symbol):
437 446 try:
438 447 revsymbol(repo, symbol)
439 448 return True
440 449 except error.RepoLookupError:
441 450 return False
442 451
443 452 def revsymbol(repo, symbol):
444 453 """Returns a context given a single revision symbol (as string).
445 454
446 455 This is similar to revsingle(), but accepts only a single revision symbol,
447 456 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
448 457 not "max(public())".
449 458 """
450 459 if not isinstance(symbol, bytes):
451 460 msg = ("symbol (%s of type %s) was not a string, did you mean "
452 461 "repo[symbol]?" % (symbol, type(symbol)))
453 462 raise error.ProgrammingError(msg)
454 463 try:
455 464 return repo[symbol]
456 465 except (error.FilteredIndexError, error.FilteredLookupError,
457 466 error.FilteredRepoLookupError):
458 467 raise _filterederror(repo, symbol)
459 468
460 469 def _filterederror(repo, changeid):
461 470 """build an exception to be raised about a filtered changeid
462 471
463 472 This is extracted in a function to help extensions (eg: evolve) to
464 473 experiment with various message variants."""
465 474 if repo.filtername.startswith('visible'):
466 475
467 476 # Check if the changeset is obsolete
468 477 unfilteredrepo = repo.unfiltered()
469 478 ctx = revsymbol(unfilteredrepo, changeid)
470 479
471 480 # If the changeset is obsolete, enrich the message with the reason
472 481 # that made this changeset not visible
473 482 if ctx.obsolete():
474 483 msg = obsutil._getfilteredreason(repo, changeid, ctx)
475 484 else:
476 485 msg = _("hidden revision '%s'") % changeid
477 486
478 487 hint = _('use --hidden to access hidden revisions')
479 488
480 489 return error.FilteredRepoLookupError(msg, hint=hint)
481 490 msg = _("filtered revision '%s' (not in '%s' subset)")
482 491 msg %= (changeid, repo.filtername)
483 492 return error.FilteredRepoLookupError(msg)
484 493
485 494 def revsingle(repo, revspec, default='.', localalias=None):
486 495 if not revspec and revspec != 0:
487 496 return repo[default]
488 497
489 498 l = revrange(repo, [revspec], localalias=localalias)
490 499 if not l:
491 500 raise error.Abort(_('empty revision set'))
492 501 return repo[l.last()]
493 502
494 503 def _pairspec(revspec):
495 504 tree = revsetlang.parse(revspec)
496 505 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
497 506
498 507 def revpairnodes(repo, revs):
499 508 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
500 509 ctx1, ctx2 = revpair(repo, revs)
501 510 return ctx1.node(), ctx2.node()
502 511
503 512 def revpair(repo, revs):
504 513 if not revs:
505 514 return repo['.'], repo[None]
506 515
507 516 l = revrange(repo, revs)
508 517
509 518 if not l:
510 519 first = second = None
511 520 elif l.isascending():
512 521 first = l.min()
513 522 second = l.max()
514 523 elif l.isdescending():
515 524 first = l.max()
516 525 second = l.min()
517 526 else:
518 527 first = l.first()
519 528 second = l.last()
520 529
521 530 if first is None:
522 531 raise error.Abort(_('empty revision range'))
523 532 if (first == second and len(revs) >= 2
524 533 and not all(revrange(repo, [r]) for r in revs)):
525 534 raise error.Abort(_('empty revision on one side of range'))
526 535
527 536 # if top-level is range expression, the result must always be a pair
528 537 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
529 538 return repo[first], repo[None]
530 539
531 540 return repo[first], repo[second]
532 541
533 542 def revrange(repo, specs, localalias=None):
534 543 """Execute 1 to many revsets and return the union.
535 544
536 545 This is the preferred mechanism for executing revsets using user-specified
537 546 config options, such as revset aliases.
538 547
539 548 The revsets specified by ``specs`` will be executed via a chained ``OR``
540 549 expression. If ``specs`` is empty, an empty result is returned.
541 550
542 551 ``specs`` can contain integers, in which case they are assumed to be
543 552 revision numbers.
544 553
545 554 It is assumed the revsets are already formatted. If you have arguments
546 555 that need to be expanded in the revset, call ``revsetlang.formatspec()``
547 556 and pass the result as an element of ``specs``.
548 557
549 558 Specifying a single revset is allowed.
550 559
551 560 Returns a ``revset.abstractsmartset`` which is a list-like interface over
552 561 integer revisions.
553 562 """
554 563 allspecs = []
555 564 for spec in specs:
556 565 if isinstance(spec, int):
557 566 spec = revsetlang.formatspec('rev(%d)', spec)
558 567 allspecs.append(spec)
559 568 return repo.anyrevs(allspecs, user=True, localalias=localalias)
560 569
561 570 def meaningfulparents(repo, ctx):
562 571 """Return list of meaningful (or all if debug) parentrevs for rev.
563 572
564 573 For merges (two non-nullrev revisions) both parents are meaningful.
565 574 Otherwise the first parent revision is considered meaningful if it
566 575 is not the preceding revision.
567 576 """
568 577 parents = ctx.parents()
569 578 if len(parents) > 1:
570 579 return parents
571 580 if repo.ui.debugflag:
572 581 return [parents[0], repo['null']]
573 582 if parents[0].rev() >= intrev(ctx) - 1:
574 583 return []
575 584 return parents
576 585
577 586 def expandpats(pats):
578 587 '''Expand bare globs when running on windows.
579 588 On posix we assume it already has already been done by sh.'''
580 589 if not util.expandglobs:
581 590 return list(pats)
582 591 ret = []
583 592 for kindpat in pats:
584 593 kind, pat = matchmod._patsplit(kindpat, None)
585 594 if kind is None:
586 595 try:
587 596 globbed = glob.glob(pat)
588 597 except re.error:
589 598 globbed = [pat]
590 599 if globbed:
591 600 ret.extend(globbed)
592 601 continue
593 602 ret.append(kindpat)
594 603 return ret
595 604
596 605 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
597 606 badfn=None):
598 607 '''Return a matcher and the patterns that were used.
599 608 The matcher will warn about bad matches, unless an alternate badfn callback
600 609 is provided.'''
601 610 if pats == ("",):
602 611 pats = []
603 612 if opts is None:
604 613 opts = {}
605 614 if not globbed and default == 'relpath':
606 615 pats = expandpats(pats or [])
607 616
608 617 def bad(f, msg):
609 618 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
610 619
611 620 if badfn is None:
612 621 badfn = bad
613 622
614 623 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
615 624 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
616 625
617 626 if m.always():
618 627 pats = []
619 628 return m, pats
620 629
621 630 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
622 631 badfn=None):
623 632 '''Return a matcher that will warn about bad matches.'''
624 633 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
625 634
626 635 def matchall(repo):
627 636 '''Return a matcher that will efficiently match everything.'''
628 637 return matchmod.always(repo.root, repo.getcwd())
629 638
630 639 def matchfiles(repo, files, badfn=None):
631 640 '''Return a matcher that will efficiently match exactly these files.'''
632 641 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
633 642
634 643 def parsefollowlinespattern(repo, rev, pat, msg):
635 644 """Return a file name from `pat` pattern suitable for usage in followlines
636 645 logic.
637 646 """
638 647 if not matchmod.patkind(pat):
639 648 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
640 649 else:
641 650 ctx = repo[rev]
642 651 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
643 652 files = [f for f in ctx if m(f)]
644 653 if len(files) != 1:
645 654 raise error.ParseError(msg)
646 655 return files[0]
647 656
648 657 def origpath(ui, repo, filepath):
649 658 '''customize where .orig files are created
650 659
651 660 Fetch user defined path from config file: [ui] origbackuppath = <path>
652 661 Fall back to default (filepath with .orig suffix) if not specified
653 662 '''
654 663 origbackuppath = ui.config('ui', 'origbackuppath')
655 664 if not origbackuppath:
656 665 return filepath + ".orig"
657 666
658 667 # Convert filepath from an absolute path into a path inside the repo.
659 668 filepathfromroot = util.normpath(os.path.relpath(filepath,
660 669 start=repo.root))
661 670
662 671 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
663 672 origbackupdir = origvfs.dirname(filepathfromroot)
664 673 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
665 674 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
666 675
667 676 # Remove any files that conflict with the backup file's path
668 677 for f in reversed(list(util.finddirs(filepathfromroot))):
669 678 if origvfs.isfileorlink(f):
670 679 ui.note(_('removing conflicting file: %s\n')
671 680 % origvfs.join(f))
672 681 origvfs.unlink(f)
673 682 break
674 683
675 684 origvfs.makedirs(origbackupdir)
676 685
677 686 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
678 687 ui.note(_('removing conflicting directory: %s\n')
679 688 % origvfs.join(filepathfromroot))
680 689 origvfs.rmtree(filepathfromroot, forcibly=True)
681 690
682 691 return origvfs.join(filepathfromroot)
683 692
684 693 class _containsnode(object):
685 694 """proxy __contains__(node) to container.__contains__ which accepts revs"""
686 695
687 696 def __init__(self, repo, revcontainer):
688 697 self._torev = repo.changelog.rev
689 698 self._revcontains = revcontainer.__contains__
690 699
691 700 def __contains__(self, node):
692 701 return self._revcontains(self._torev(node))
693 702
694 703 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
695 704 """do common cleanups when old nodes are replaced by new nodes
696 705
697 706 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
698 707 (we might also want to move working directory parent in the future)
699 708
700 709 By default, bookmark moves are calculated automatically from 'replacements',
701 710 but 'moves' can be used to override that. Also, 'moves' may include
702 711 additional bookmark moves that should not have associated obsmarkers.
703 712
704 713 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
705 714 have replacements. operation is a string, like "rebase".
706 715
707 716 metadata is dictionary containing metadata to be stored in obsmarker if
708 717 obsolescence is enabled.
709 718 """
710 719 if not replacements and not moves:
711 720 return
712 721
713 722 # translate mapping's other forms
714 723 if not util.safehasattr(replacements, 'items'):
715 724 replacements = {n: () for n in replacements}
716 725
717 726 # Calculate bookmark movements
718 727 if moves is None:
719 728 moves = {}
720 729 # Unfiltered repo is needed since nodes in replacements might be hidden.
721 730 unfi = repo.unfiltered()
722 731 for oldnode, newnodes in replacements.items():
723 732 if oldnode in moves:
724 733 continue
725 734 if len(newnodes) > 1:
726 735 # usually a split, take the one with biggest rev number
727 736 newnode = next(unfi.set('max(%ln)', newnodes)).node()
728 737 elif len(newnodes) == 0:
729 738 # move bookmark backwards
730 739 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
731 740 list(replacements)))
732 741 if roots:
733 742 newnode = roots[0].node()
734 743 else:
735 744 newnode = nullid
736 745 else:
737 746 newnode = newnodes[0]
738 747 moves[oldnode] = newnode
739 748
740 749 with repo.transaction('cleanup') as tr:
741 750 # Move bookmarks
742 751 bmarks = repo._bookmarks
743 752 bmarkchanges = []
744 753 allnewnodes = [n for ns in replacements.values() for n in ns]
745 754 for oldnode, newnode in moves.items():
746 755 oldbmarks = repo.nodebookmarks(oldnode)
747 756 if not oldbmarks:
748 757 continue
749 758 from . import bookmarks # avoid import cycle
750 759 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
751 760 (util.rapply(pycompat.maybebytestr, oldbmarks),
752 761 hex(oldnode), hex(newnode)))
753 762 # Delete divergent bookmarks being parents of related newnodes
754 763 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
755 764 allnewnodes, newnode, oldnode)
756 765 deletenodes = _containsnode(repo, deleterevs)
757 766 for name in oldbmarks:
758 767 bmarkchanges.append((name, newnode))
759 768 for b in bookmarks.divergent2delete(repo, deletenodes, name):
760 769 bmarkchanges.append((b, None))
761 770
762 771 if bmarkchanges:
763 772 bmarks.applychanges(repo, tr, bmarkchanges)
764 773
765 774 # Obsolete or strip nodes
766 775 if obsolete.isenabled(repo, obsolete.createmarkersopt):
767 776 # If a node is already obsoleted, and we want to obsolete it
768 777 # without a successor, skip that obssolete request since it's
769 778 # unnecessary. That's the "if s or not isobs(n)" check below.
770 779 # Also sort the node in topology order, that might be useful for
771 780 # some obsstore logic.
772 781 # NOTE: the filtering and sorting might belong to createmarkers.
773 782 isobs = unfi.obsstore.successors.__contains__
774 783 torev = unfi.changelog.rev
775 784 sortfunc = lambda ns: torev(ns[0])
776 785 rels = [(unfi[n], tuple(unfi[m] for m in s))
777 786 for n, s in sorted(replacements.items(), key=sortfunc)
778 787 if s or not isobs(n)]
779 788 if rels:
780 789 obsolete.createmarkers(repo, rels, operation=operation,
781 790 metadata=metadata)
782 791 else:
783 792 from . import repair # avoid import cycle
784 793 tostrip = list(replacements)
785 794 if tostrip:
786 795 repair.delayedstrip(repo.ui, repo, tostrip, operation)
787 796
788 797 def addremove(repo, matcher, prefix, opts=None):
789 798 if opts is None:
790 799 opts = {}
791 800 m = matcher
792 801 dry_run = opts.get('dry_run')
793 802 try:
794 803 similarity = float(opts.get('similarity') or 0)
795 804 except ValueError:
796 805 raise error.Abort(_('similarity must be a number'))
797 806 if similarity < 0 or similarity > 100:
798 807 raise error.Abort(_('similarity must be between 0 and 100'))
799 808 similarity /= 100.0
800 809
801 810 ret = 0
802 811 join = lambda f: os.path.join(prefix, f)
803 812
804 813 wctx = repo[None]
805 814 for subpath in sorted(wctx.substate):
806 815 submatch = matchmod.subdirmatcher(subpath, m)
807 816 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
808 817 sub = wctx.sub(subpath)
809 818 try:
810 819 if sub.addremove(submatch, prefix, opts):
811 820 ret = 1
812 821 except error.LookupError:
813 822 repo.ui.status(_("skipping missing subrepository: %s\n")
814 823 % join(subpath))
815 824
816 825 rejected = []
817 826 def badfn(f, msg):
818 827 if f in m.files():
819 828 m.bad(f, msg)
820 829 rejected.append(f)
821 830
822 831 badmatch = matchmod.badmatch(m, badfn)
823 832 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
824 833 badmatch)
825 834
826 835 unknownset = set(unknown + forgotten)
827 836 toprint = unknownset.copy()
828 837 toprint.update(deleted)
829 838 for abs in sorted(toprint):
830 839 if repo.ui.verbose or not m.exact(abs):
831 840 if abs in unknownset:
832 841 status = _('adding %s\n') % m.uipath(abs)
833 842 else:
834 843 status = _('removing %s\n') % m.uipath(abs)
835 844 repo.ui.status(status)
836 845
837 846 renames = _findrenames(repo, m, added + unknown, removed + deleted,
838 847 similarity)
839 848
840 849 if not dry_run:
841 850 _markchanges(repo, unknown + forgotten, deleted, renames)
842 851
843 852 for f in rejected:
844 853 if f in m.files():
845 854 return 1
846 855 return ret
847 856
848 857 def marktouched(repo, files, similarity=0.0):
849 858 '''Assert that files have somehow been operated upon. files are relative to
850 859 the repo root.'''
851 860 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
852 861 rejected = []
853 862
854 863 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
855 864
856 865 if repo.ui.verbose:
857 866 unknownset = set(unknown + forgotten)
858 867 toprint = unknownset.copy()
859 868 toprint.update(deleted)
860 869 for abs in sorted(toprint):
861 870 if abs in unknownset:
862 871 status = _('adding %s\n') % abs
863 872 else:
864 873 status = _('removing %s\n') % abs
865 874 repo.ui.status(status)
866 875
867 876 renames = _findrenames(repo, m, added + unknown, removed + deleted,
868 877 similarity)
869 878
870 879 _markchanges(repo, unknown + forgotten, deleted, renames)
871 880
872 881 for f in rejected:
873 882 if f in m.files():
874 883 return 1
875 884 return 0
876 885
877 886 def _interestingfiles(repo, matcher):
878 887 '''Walk dirstate with matcher, looking for files that addremove would care
879 888 about.
880 889
881 890 This is different from dirstate.status because it doesn't care about
882 891 whether files are modified or clean.'''
883 892 added, unknown, deleted, removed, forgotten = [], [], [], [], []
884 893 audit_path = pathutil.pathauditor(repo.root, cached=True)
885 894
886 895 ctx = repo[None]
887 896 dirstate = repo.dirstate
888 897 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
889 898 unknown=True, ignored=False, full=False)
890 899 for abs, st in walkresults.iteritems():
891 900 dstate = dirstate[abs]
892 901 if dstate == '?' and audit_path.check(abs):
893 902 unknown.append(abs)
894 903 elif dstate != 'r' and not st:
895 904 deleted.append(abs)
896 905 elif dstate == 'r' and st:
897 906 forgotten.append(abs)
898 907 # for finding renames
899 908 elif dstate == 'r' and not st:
900 909 removed.append(abs)
901 910 elif dstate == 'a':
902 911 added.append(abs)
903 912
904 913 return added, unknown, deleted, removed, forgotten
905 914
906 915 def _findrenames(repo, matcher, added, removed, similarity):
907 916 '''Find renames from removed files to added ones.'''
908 917 renames = {}
909 918 if similarity > 0:
910 919 for old, new, score in similar.findrenames(repo, added, removed,
911 920 similarity):
912 921 if (repo.ui.verbose or not matcher.exact(old)
913 922 or not matcher.exact(new)):
914 923 repo.ui.status(_('recording removal of %s as rename to %s '
915 924 '(%d%% similar)\n') %
916 925 (matcher.rel(old), matcher.rel(new),
917 926 score * 100))
918 927 renames[new] = old
919 928 return renames
920 929
921 930 def _markchanges(repo, unknown, deleted, renames):
922 931 '''Marks the files in unknown as added, the files in deleted as removed,
923 932 and the files in renames as copied.'''
924 933 wctx = repo[None]
925 934 with repo.wlock():
926 935 wctx.forget(deleted)
927 936 wctx.add(unknown)
928 937 for new, old in renames.iteritems():
929 938 wctx.copy(old, new)
930 939
931 940 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
932 941 """Update the dirstate to reflect the intent of copying src to dst. For
933 942 different reasons it might not end with dst being marked as copied from src.
934 943 """
935 944 origsrc = repo.dirstate.copied(src) or src
936 945 if dst == origsrc: # copying back a copy?
937 946 if repo.dirstate[dst] not in 'mn' and not dryrun:
938 947 repo.dirstate.normallookup(dst)
939 948 else:
940 949 if repo.dirstate[origsrc] == 'a' and origsrc == src:
941 950 if not ui.quiet:
942 951 ui.warn(_("%s has not been committed yet, so no copy "
943 952 "data will be stored for %s.\n")
944 953 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
945 954 if repo.dirstate[dst] in '?r' and not dryrun:
946 955 wctx.add([dst])
947 956 elif not dryrun:
948 957 wctx.copy(origsrc, dst)
949 958
950 959 def readrequires(opener, supported):
951 960 '''Reads and parses .hg/requires and checks if all entries found
952 961 are in the list of supported features.'''
953 962 requirements = set(opener.read("requires").splitlines())
954 963 missings = []
955 964 for r in requirements:
956 965 if r not in supported:
957 966 if not r or not r[0:1].isalnum():
958 967 raise error.RequirementError(_(".hg/requires file is corrupt"))
959 968 missings.append(r)
960 969 missings.sort()
961 970 if missings:
962 971 raise error.RequirementError(
963 972 _("repository requires features unknown to this Mercurial: %s")
964 973 % " ".join(missings),
965 974 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
966 975 " for more information"))
967 976 return requirements
968 977
969 978 def writerequires(opener, requirements):
970 979 with opener('requires', 'w') as fp:
971 980 for r in sorted(requirements):
972 981 fp.write("%s\n" % r)
973 982
974 983 class filecachesubentry(object):
975 984 def __init__(self, path, stat):
976 985 self.path = path
977 986 self.cachestat = None
978 987 self._cacheable = None
979 988
980 989 if stat:
981 990 self.cachestat = filecachesubentry.stat(self.path)
982 991
983 992 if self.cachestat:
984 993 self._cacheable = self.cachestat.cacheable()
985 994 else:
986 995 # None means we don't know yet
987 996 self._cacheable = None
988 997
989 998 def refresh(self):
990 999 if self.cacheable():
991 1000 self.cachestat = filecachesubentry.stat(self.path)
992 1001
993 1002 def cacheable(self):
994 1003 if self._cacheable is not None:
995 1004 return self._cacheable
996 1005
997 1006 # we don't know yet, assume it is for now
998 1007 return True
999 1008
1000 1009 def changed(self):
1001 1010 # no point in going further if we can't cache it
1002 1011 if not self.cacheable():
1003 1012 return True
1004 1013
1005 1014 newstat = filecachesubentry.stat(self.path)
1006 1015
1007 1016 # we may not know if it's cacheable yet, check again now
1008 1017 if newstat and self._cacheable is None:
1009 1018 self._cacheable = newstat.cacheable()
1010 1019
1011 1020 # check again
1012 1021 if not self._cacheable:
1013 1022 return True
1014 1023
1015 1024 if self.cachestat != newstat:
1016 1025 self.cachestat = newstat
1017 1026 return True
1018 1027 else:
1019 1028 return False
1020 1029
1021 1030 @staticmethod
1022 1031 def stat(path):
1023 1032 try:
1024 1033 return util.cachestat(path)
1025 1034 except OSError as e:
1026 1035 if e.errno != errno.ENOENT:
1027 1036 raise
1028 1037
1029 1038 class filecacheentry(object):
1030 1039 def __init__(self, paths, stat=True):
1031 1040 self._entries = []
1032 1041 for path in paths:
1033 1042 self._entries.append(filecachesubentry(path, stat))
1034 1043
1035 1044 def changed(self):
1036 1045 '''true if any entry has changed'''
1037 1046 for entry in self._entries:
1038 1047 if entry.changed():
1039 1048 return True
1040 1049 return False
1041 1050
1042 1051 def refresh(self):
1043 1052 for entry in self._entries:
1044 1053 entry.refresh()
1045 1054
1046 1055 class filecache(object):
1047 1056 '''A property like decorator that tracks files under .hg/ for updates.
1048 1057
1049 1058 Records stat info when called in _filecache.
1050 1059
1051 1060 On subsequent calls, compares old stat info with new info, and recreates the
1052 1061 object when any of the files changes, updating the new stat info in
1053 1062 _filecache.
1054 1063
1055 1064 Mercurial either atomic renames or appends for files under .hg,
1056 1065 so to ensure the cache is reliable we need the filesystem to be able
1057 1066 to tell us if a file has been replaced. If it can't, we fallback to
1058 1067 recreating the object on every call (essentially the same behavior as
1059 1068 propertycache).
1060 1069
1061 1070 '''
1062 1071 def __init__(self, *paths):
1063 1072 self.paths = paths
1064 1073
1065 1074 def join(self, obj, fname):
1066 1075 """Used to compute the runtime path of a cached file.
1067 1076
1068 1077 Users should subclass filecache and provide their own version of this
1069 1078 function to call the appropriate join function on 'obj' (an instance
1070 1079 of the class that its member function was decorated).
1071 1080 """
1072 1081 raise NotImplementedError
1073 1082
1074 1083 def __call__(self, func):
1075 1084 self.func = func
1076 1085 self.name = func.__name__.encode('ascii')
1077 1086 return self
1078 1087
1079 1088 def __get__(self, obj, type=None):
1080 1089 # if accessed on the class, return the descriptor itself.
1081 1090 if obj is None:
1082 1091 return self
1083 1092 # do we need to check if the file changed?
1084 1093 if self.name in obj.__dict__:
1085 1094 assert self.name in obj._filecache, self.name
1086 1095 return obj.__dict__[self.name]
1087 1096
1088 1097 entry = obj._filecache.get(self.name)
1089 1098
1090 1099 if entry:
1091 1100 if entry.changed():
1092 1101 entry.obj = self.func(obj)
1093 1102 else:
1094 1103 paths = [self.join(obj, path) for path in self.paths]
1095 1104
1096 1105 # We stat -before- creating the object so our cache doesn't lie if
1097 1106 # a writer modified between the time we read and stat
1098 1107 entry = filecacheentry(paths, True)
1099 1108 entry.obj = self.func(obj)
1100 1109
1101 1110 obj._filecache[self.name] = entry
1102 1111
1103 1112 obj.__dict__[self.name] = entry.obj
1104 1113 return entry.obj
1105 1114
1106 1115 def __set__(self, obj, value):
1107 1116 if self.name not in obj._filecache:
1108 1117 # we add an entry for the missing value because X in __dict__
1109 1118 # implies X in _filecache
1110 1119 paths = [self.join(obj, path) for path in self.paths]
1111 1120 ce = filecacheentry(paths, False)
1112 1121 obj._filecache[self.name] = ce
1113 1122 else:
1114 1123 ce = obj._filecache[self.name]
1115 1124
1116 1125 ce.obj = value # update cached copy
1117 1126 obj.__dict__[self.name] = value # update copy returned by obj.x
1118 1127
1119 1128 def __delete__(self, obj):
1120 1129 try:
1121 1130 del obj.__dict__[self.name]
1122 1131 except KeyError:
1123 1132 raise AttributeError(self.name)
1124 1133
1125 1134 def extdatasource(repo, source):
1126 1135 """Gather a map of rev -> value dict from the specified source
1127 1136
1128 1137 A source spec is treated as a URL, with a special case shell: type
1129 1138 for parsing the output from a shell command.
1130 1139
1131 1140 The data is parsed as a series of newline-separated records where
1132 1141 each record is a revision specifier optionally followed by a space
1133 1142 and a freeform string value. If the revision is known locally, it
1134 1143 is converted to a rev, otherwise the record is skipped.
1135 1144
1136 1145 Note that both key and value are treated as UTF-8 and converted to
1137 1146 the local encoding. This allows uniformity between local and
1138 1147 remote data sources.
1139 1148 """
1140 1149
1141 1150 spec = repo.ui.config("extdata", source)
1142 1151 if not spec:
1143 1152 raise error.Abort(_("unknown extdata source '%s'") % source)
1144 1153
1145 1154 data = {}
1146 1155 src = proc = None
1147 1156 try:
1148 1157 if spec.startswith("shell:"):
1149 1158 # external commands should be run relative to the repo root
1150 1159 cmd = spec[6:]
1151 1160 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1152 1161 close_fds=procutil.closefds,
1153 1162 stdout=subprocess.PIPE, cwd=repo.root)
1154 1163 src = proc.stdout
1155 1164 else:
1156 1165 # treat as a URL or file
1157 1166 src = url.open(repo.ui, spec)
1158 1167 for l in src:
1159 1168 if " " in l:
1160 1169 k, v = l.strip().split(" ", 1)
1161 1170 else:
1162 1171 k, v = l.strip(), ""
1163 1172
1164 1173 k = encoding.tolocal(k)
1165 1174 try:
1166 1175 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1167 1176 except (error.LookupError, error.RepoLookupError):
1168 1177 pass # we ignore data for nodes that don't exist locally
1169 1178 finally:
1170 1179 if proc:
1171 1180 proc.communicate()
1172 1181 if src:
1173 1182 src.close()
1174 1183 if proc and proc.returncode != 0:
1175 1184 raise error.Abort(_("extdata command '%s' failed: %s")
1176 1185 % (cmd, procutil.explainexit(proc.returncode)))
1177 1186
1178 1187 return data
1179 1188
1180 1189 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1181 1190 if lock is None:
1182 1191 raise error.LockInheritanceContractViolation(
1183 1192 'lock can only be inherited while held')
1184 1193 if environ is None:
1185 1194 environ = {}
1186 1195 with lock.inherit() as locker:
1187 1196 environ[envvar] = locker
1188 1197 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1189 1198
1190 1199 def wlocksub(repo, cmd, *args, **kwargs):
1191 1200 """run cmd as a subprocess that allows inheriting repo's wlock
1192 1201
1193 1202 This can only be called while the wlock is held. This takes all the
1194 1203 arguments that ui.system does, and returns the exit code of the
1195 1204 subprocess."""
1196 1205 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1197 1206 **kwargs)
1198 1207
1199 1208 def gdinitconfig(ui):
1200 1209 """helper function to know if a repo should be created as general delta
1201 1210 """
1202 1211 # experimental config: format.generaldelta
1203 1212 return (ui.configbool('format', 'generaldelta')
1204 1213 or ui.configbool('format', 'usegeneraldelta'))
1205 1214
1206 1215 def gddeltaconfig(ui):
1207 1216 """helper function to know if incoming delta should be optimised
1208 1217 """
1209 1218 # experimental config: format.generaldelta
1210 1219 return ui.configbool('format', 'generaldelta')
1211 1220
1212 1221 class simplekeyvaluefile(object):
1213 1222 """A simple file with key=value lines
1214 1223
1215 1224 Keys must be alphanumerics and start with a letter, values must not
1216 1225 contain '\n' characters"""
1217 1226 firstlinekey = '__firstline'
1218 1227
1219 1228 def __init__(self, vfs, path, keys=None):
1220 1229 self.vfs = vfs
1221 1230 self.path = path
1222 1231
1223 1232 def read(self, firstlinenonkeyval=False):
1224 1233 """Read the contents of a simple key-value file
1225 1234
1226 1235 'firstlinenonkeyval' indicates whether the first line of file should
1227 1236 be treated as a key-value pair or reuturned fully under the
1228 1237 __firstline key."""
1229 1238 lines = self.vfs.readlines(self.path)
1230 1239 d = {}
1231 1240 if firstlinenonkeyval:
1232 1241 if not lines:
1233 1242 e = _("empty simplekeyvalue file")
1234 1243 raise error.CorruptedState(e)
1235 1244 # we don't want to include '\n' in the __firstline
1236 1245 d[self.firstlinekey] = lines[0][:-1]
1237 1246 del lines[0]
1238 1247
1239 1248 try:
1240 1249 # the 'if line.strip()' part prevents us from failing on empty
1241 1250 # lines which only contain '\n' therefore are not skipped
1242 1251 # by 'if line'
1243 1252 updatedict = dict(line[:-1].split('=', 1) for line in lines
1244 1253 if line.strip())
1245 1254 if self.firstlinekey in updatedict:
1246 1255 e = _("%r can't be used as a key")
1247 1256 raise error.CorruptedState(e % self.firstlinekey)
1248 1257 d.update(updatedict)
1249 1258 except ValueError as e:
1250 1259 raise error.CorruptedState(str(e))
1251 1260 return d
1252 1261
1253 1262 def write(self, data, firstline=None):
1254 1263 """Write key=>value mapping to a file
1255 1264 data is a dict. Keys must be alphanumerical and start with a letter.
1256 1265 Values must not contain newline characters.
1257 1266
1258 1267 If 'firstline' is not None, it is written to file before
1259 1268 everything else, as it is, not in a key=value form"""
1260 1269 lines = []
1261 1270 if firstline is not None:
1262 1271 lines.append('%s\n' % firstline)
1263 1272
1264 1273 for k, v in data.items():
1265 1274 if k == self.firstlinekey:
1266 1275 e = "key name '%s' is reserved" % self.firstlinekey
1267 1276 raise error.ProgrammingError(e)
1268 1277 if not k[0:1].isalpha():
1269 1278 e = "keys must start with a letter in a key-value file"
1270 1279 raise error.ProgrammingError(e)
1271 1280 if not k.isalnum():
1272 1281 e = "invalid key name in a simple key-value file"
1273 1282 raise error.ProgrammingError(e)
1274 1283 if '\n' in v:
1275 1284 e = "invalid value in a simple key-value file"
1276 1285 raise error.ProgrammingError(e)
1277 1286 lines.append("%s=%s\n" % (k, v))
1278 1287 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1279 1288 fp.write(''.join(lines))
1280 1289
1281 1290 _reportobsoletedsource = [
1282 1291 'debugobsolete',
1283 1292 'pull',
1284 1293 'push',
1285 1294 'serve',
1286 1295 'unbundle',
1287 1296 ]
1288 1297
1289 1298 _reportnewcssource = [
1290 1299 'pull',
1291 1300 'unbundle',
1292 1301 ]
1293 1302
1294 1303 # a list of (repo, ctx, files) functions called by various commands to allow
1295 1304 # extensions to ensure the corresponding files are available locally, before the
1296 1305 # command uses them.
1297 1306 fileprefetchhooks = util.hooks()
1298 1307
1299 1308 # A marker that tells the evolve extension to suppress its own reporting
1300 1309 _reportstroubledchangesets = True
1301 1310
1302 1311 def registersummarycallback(repo, otr, txnname=''):
1303 1312 """register a callback to issue a summary after the transaction is closed
1304 1313 """
1305 1314 def txmatch(sources):
1306 1315 return any(txnname.startswith(source) for source in sources)
1307 1316
1308 1317 categories = []
1309 1318
1310 1319 def reportsummary(func):
1311 1320 """decorator for report callbacks."""
1312 1321 # The repoview life cycle is shorter than the one of the actual
1313 1322 # underlying repository. So the filtered object can die before the
1314 1323 # weakref is used leading to troubles. We keep a reference to the
1315 1324 # unfiltered object and restore the filtering when retrieving the
1316 1325 # repository through the weakref.
1317 1326 filtername = repo.filtername
1318 1327 reporef = weakref.ref(repo.unfiltered())
1319 1328 def wrapped(tr):
1320 1329 repo = reporef()
1321 1330 if filtername:
1322 1331 repo = repo.filtered(filtername)
1323 1332 func(repo, tr)
1324 1333 newcat = '%02i-txnreport' % len(categories)
1325 1334 otr.addpostclose(newcat, wrapped)
1326 1335 categories.append(newcat)
1327 1336 return wrapped
1328 1337
1329 1338 if txmatch(_reportobsoletedsource):
1330 1339 @reportsummary
1331 1340 def reportobsoleted(repo, tr):
1332 1341 obsoleted = obsutil.getobsoleted(repo, tr)
1333 1342 if obsoleted:
1334 1343 repo.ui.status(_('obsoleted %i changesets\n')
1335 1344 % len(obsoleted))
1336 1345
1337 1346 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1338 1347 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1339 1348 instabilitytypes = [
1340 1349 ('orphan', 'orphan'),
1341 1350 ('phase-divergent', 'phasedivergent'),
1342 1351 ('content-divergent', 'contentdivergent'),
1343 1352 ]
1344 1353
1345 1354 def getinstabilitycounts(repo):
1346 1355 filtered = repo.changelog.filteredrevs
1347 1356 counts = {}
1348 1357 for instability, revset in instabilitytypes:
1349 1358 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1350 1359 filtered)
1351 1360 return counts
1352 1361
1353 1362 oldinstabilitycounts = getinstabilitycounts(repo)
1354 1363 @reportsummary
1355 1364 def reportnewinstabilities(repo, tr):
1356 1365 newinstabilitycounts = getinstabilitycounts(repo)
1357 1366 for instability, revset in instabilitytypes:
1358 1367 delta = (newinstabilitycounts[instability] -
1359 1368 oldinstabilitycounts[instability])
1360 1369 if delta > 0:
1361 1370 repo.ui.warn(_('%i new %s changesets\n') %
1362 1371 (delta, instability))
1363 1372
1364 1373 if txmatch(_reportnewcssource):
1365 1374 @reportsummary
1366 1375 def reportnewcs(repo, tr):
1367 1376 """Report the range of new revisions pulled/unbundled."""
1368 1377 newrevs = tr.changes.get('revs', xrange(0, 0))
1369 1378 if not newrevs:
1370 1379 return
1371 1380
1372 1381 # Compute the bounds of new revisions' range, excluding obsoletes.
1373 1382 unfi = repo.unfiltered()
1374 1383 revs = unfi.revs('%ld and not obsolete()', newrevs)
1375 1384 if not revs:
1376 1385 # Got only obsoletes.
1377 1386 return
1378 1387 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1379 1388
1380 1389 if minrev == maxrev:
1381 1390 revrange = minrev
1382 1391 else:
1383 1392 revrange = '%s:%s' % (minrev, maxrev)
1384 1393 repo.ui.status(_('new changesets %s\n') % revrange)
1385 1394
1386 1395 def nodesummaries(repo, nodes, maxnumnodes=4):
1387 1396 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1388 1397 return ' '.join(short(h) for h in nodes)
1389 1398 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1390 1399 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1391 1400
1392 1401 def enforcesinglehead(repo, tr, desc):
1393 1402 """check that no named branch has multiple heads"""
1394 1403 if desc in ('strip', 'repair'):
1395 1404 # skip the logic during strip
1396 1405 return
1397 1406 visible = repo.filtered('visible')
1398 1407 # possible improvement: we could restrict the check to affected branch
1399 1408 for name, heads in visible.branchmap().iteritems():
1400 1409 if len(heads) > 1:
1401 1410 msg = _('rejecting multiple heads on branch "%s"')
1402 1411 msg %= name
1403 1412 hint = _('%d heads: %s')
1404 1413 hint %= (len(heads), nodesummaries(repo, heads))
1405 1414 raise error.Abort(msg, hint=hint)
1406 1415
1407 1416 def wrapconvertsink(sink):
1408 1417 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1409 1418 before it is used, whether or not the convert extension was formally loaded.
1410 1419 """
1411 1420 return sink
1412 1421
1413 1422 def unhidehashlikerevs(repo, specs, hiddentype):
1414 1423 """parse the user specs and unhide changesets whose hash or revision number
1415 1424 is passed.
1416 1425
1417 1426 hiddentype can be: 1) 'warn': warn while unhiding changesets
1418 1427 2) 'nowarn': don't warn while unhiding changesets
1419 1428
1420 1429 returns a repo object with the required changesets unhidden
1421 1430 """
1422 1431 if not repo.filtername or not repo.ui.configbool('experimental',
1423 1432 'directaccess'):
1424 1433 return repo
1425 1434
1426 1435 if repo.filtername not in ('visible', 'visible-hidden'):
1427 1436 return repo
1428 1437
1429 1438 symbols = set()
1430 1439 for spec in specs:
1431 1440 try:
1432 1441 tree = revsetlang.parse(spec)
1433 1442 except error.ParseError: # will be reported by scmutil.revrange()
1434 1443 continue
1435 1444
1436 1445 symbols.update(revsetlang.gethashlikesymbols(tree))
1437 1446
1438 1447 if not symbols:
1439 1448 return repo
1440 1449
1441 1450 revs = _getrevsfromsymbols(repo, symbols)
1442 1451
1443 1452 if not revs:
1444 1453 return repo
1445 1454
1446 1455 if hiddentype == 'warn':
1447 1456 unfi = repo.unfiltered()
1448 1457 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1449 1458 repo.ui.warn(_("warning: accessing hidden changesets for write "
1450 1459 "operation: %s\n") % revstr)
1451 1460
1452 1461 # we have to use new filtername to separate branch/tags cache until we can
1453 1462 # disbale these cache when revisions are dynamically pinned.
1454 1463 return repo.filtered('visible-hidden', revs)
1455 1464
1456 1465 def _getrevsfromsymbols(repo, symbols):
1457 1466 """parse the list of symbols and returns a set of revision numbers of hidden
1458 1467 changesets present in symbols"""
1459 1468 revs = set()
1460 1469 unfi = repo.unfiltered()
1461 1470 unficl = unfi.changelog
1462 1471 cl = repo.changelog
1463 1472 tiprev = len(unficl)
1464 1473 pmatch = unficl._partialmatch
1465 1474 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1466 1475 for s in symbols:
1467 1476 try:
1468 1477 n = int(s)
1469 1478 if n <= tiprev:
1470 1479 if not allowrevnums:
1471 1480 continue
1472 1481 else:
1473 1482 if n not in cl:
1474 1483 revs.add(n)
1475 1484 continue
1476 1485 except ValueError:
1477 1486 pass
1478 1487
1479 1488 try:
1480 1489 s = pmatch(s)
1481 1490 except (error.LookupError, error.WdirUnsupported):
1482 1491 s = None
1483 1492
1484 1493 if s is not None:
1485 1494 rev = unficl.rev(s)
1486 1495 if rev not in cl:
1487 1496 revs.add(rev)
1488 1497
1489 1498 return revs
General Comments 0
You need to be logged in to leave comments. Login now