##// END OF EJS Templates
changectx: extract explicit computechangesetfilesadded method from context...
marmoute -
r42936:87c4cd89 default
parent child Browse files
Show More
@@ -1,2589 +1,2584 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 hex,
19 19 modifiednodeid,
20 20 nullid,
21 21 nullrev,
22 22 short,
23 23 wdirfilenodeids,
24 24 wdirhex,
25 25 )
26 26 from . import (
27 27 copies,
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 scmutil,
40 40 sparse,
41 41 subrepo,
42 42 subrepoutil,
43 43 util,
44 44 )
45 45 from .utils import (
46 46 dateutil,
47 47 stringutil,
48 48 )
49 49
50 50 propertycache = util.propertycache
51 51
52 52 class basectx(object):
53 53 """A basectx object represents the common logic for its children:
54 54 changectx: read-only context that is already present in the repo,
55 55 workingctx: a context that represents the working directory and can
56 56 be committed,
57 57 memctx: a context that represents changes in-memory and can also
58 58 be committed."""
59 59
60 60 def __init__(self, repo):
61 61 self._repo = repo
62 62
63 63 def __bytes__(self):
64 64 return short(self.node())
65 65
66 66 __str__ = encoding.strmethod(__bytes__)
67 67
68 68 def __repr__(self):
69 69 return r"<%s %s>" % (type(self).__name__, str(self))
70 70
71 71 def __eq__(self, other):
72 72 try:
73 73 return type(self) == type(other) and self._rev == other._rev
74 74 except AttributeError:
75 75 return False
76 76
77 77 def __ne__(self, other):
78 78 return not (self == other)
79 79
80 80 def __contains__(self, key):
81 81 return key in self._manifest
82 82
83 83 def __getitem__(self, key):
84 84 return self.filectx(key)
85 85
86 86 def __iter__(self):
87 87 return iter(self._manifest)
88 88
89 89 def _buildstatusmanifest(self, status):
90 90 """Builds a manifest that includes the given status results, if this is
91 91 a working copy context. For non-working copy contexts, it just returns
92 92 the normal manifest."""
93 93 return self.manifest()
94 94
95 95 def _matchstatus(self, other, match):
96 96 """This internal method provides a way for child objects to override the
97 97 match operator.
98 98 """
99 99 return match
100 100
101 101 def _buildstatus(self, other, s, match, listignored, listclean,
102 102 listunknown):
103 103 """build a status with respect to another context"""
104 104 # Load earliest manifest first for caching reasons. More specifically,
105 105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 108 # delta to what's in the cache. So that's one full reconstruction + one
109 109 # delta application.
110 110 mf2 = None
111 111 if self.rev() is not None and self.rev() < other.rev():
112 112 mf2 = self._buildstatusmanifest(s)
113 113 mf1 = other._buildstatusmanifest(s)
114 114 if mf2 is None:
115 115 mf2 = self._buildstatusmanifest(s)
116 116
117 117 modified, added = [], []
118 118 removed = []
119 119 clean = []
120 120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 121 deletedset = set(deleted)
122 122 d = mf1.diff(mf2, match=match, clean=listclean)
123 123 for fn, value in d.iteritems():
124 124 if fn in deletedset:
125 125 continue
126 126 if value is None:
127 127 clean.append(fn)
128 128 continue
129 129 (node1, flag1), (node2, flag2) = value
130 130 if node1 is None:
131 131 added.append(fn)
132 132 elif node2 is None:
133 133 removed.append(fn)
134 134 elif flag1 != flag2:
135 135 modified.append(fn)
136 136 elif node2 not in wdirfilenodeids:
137 137 # When comparing files between two commits, we save time by
138 138 # not comparing the file contents when the nodeids differ.
139 139 # Note that this means we incorrectly report a reverted change
140 140 # to a file as a modification.
141 141 modified.append(fn)
142 142 elif self[fn].cmp(other[fn]):
143 143 modified.append(fn)
144 144 else:
145 145 clean.append(fn)
146 146
147 147 if removed:
148 148 # need to filter files if they are already reported as removed
149 149 unknown = [fn for fn in unknown if fn not in mf1 and
150 150 (not match or match(fn))]
151 151 ignored = [fn for fn in ignored if fn not in mf1 and
152 152 (not match or match(fn))]
153 153 # if they're deleted, don't report them as removed
154 154 removed = [fn for fn in removed if fn not in deletedset]
155 155
156 156 return scmutil.status(modified, added, removed, deleted, unknown,
157 157 ignored, clean)
158 158
159 159 @propertycache
160 160 def substate(self):
161 161 return subrepoutil.state(self, self._repo.ui)
162 162
163 163 def subrev(self, subpath):
164 164 return self.substate[subpath][1]
165 165
166 166 def rev(self):
167 167 return self._rev
168 168 def node(self):
169 169 return self._node
170 170 def hex(self):
171 171 return hex(self.node())
172 172 def manifest(self):
173 173 return self._manifest
174 174 def manifestctx(self):
175 175 return self._manifestctx
176 176 def repo(self):
177 177 return self._repo
178 178 def phasestr(self):
179 179 return phases.phasenames[self.phase()]
180 180 def mutable(self):
181 181 return self.phase() > phases.public
182 182
183 183 def matchfileset(self, expr, badfn=None):
184 184 return fileset.match(self, expr, badfn=badfn)
185 185
186 186 def obsolete(self):
187 187 """True if the changeset is obsolete"""
188 188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189 189
190 190 def extinct(self):
191 191 """True if the changeset is extinct"""
192 192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193 193
194 194 def orphan(self):
195 195 """True if the changeset is not obsolete, but its ancestor is"""
196 196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197 197
198 198 def phasedivergent(self):
199 199 """True if the changeset tries to be a successor of a public changeset
200 200
201 201 Only non-public and non-obsolete changesets may be phase-divergent.
202 202 """
203 203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204 204
205 205 def contentdivergent(self):
206 206 """Is a successor of a changeset with multiple possible successor sets
207 207
208 208 Only non-public and non-obsolete changesets may be content-divergent.
209 209 """
210 210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211 211
212 212 def isunstable(self):
213 213 """True if the changeset is either orphan, phase-divergent or
214 214 content-divergent"""
215 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216 216
217 217 def instabilities(self):
218 218 """return the list of instabilities affecting this changeset.
219 219
220 220 Instabilities are returned as strings. possible values are:
221 221 - orphan,
222 222 - phase-divergent,
223 223 - content-divergent.
224 224 """
225 225 instabilities = []
226 226 if self.orphan():
227 227 instabilities.append('orphan')
228 228 if self.phasedivergent():
229 229 instabilities.append('phase-divergent')
230 230 if self.contentdivergent():
231 231 instabilities.append('content-divergent')
232 232 return instabilities
233 233
234 234 def parents(self):
235 235 """return contexts for each parent changeset"""
236 236 return self._parents
237 237
238 238 def p1(self):
239 239 return self._parents[0]
240 240
241 241 def p2(self):
242 242 parents = self._parents
243 243 if len(parents) == 2:
244 244 return parents[1]
245 245 return self._repo[nullrev]
246 246
247 247 def _fileinfo(self, path):
248 248 if r'_manifest' in self.__dict__:
249 249 try:
250 250 return self._manifest[path], self._manifest.flags(path)
251 251 except KeyError:
252 252 raise error.ManifestLookupError(self._node, path,
253 253 _('not found in manifest'))
254 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 255 if path in self._manifestdelta:
256 256 return (self._manifestdelta[path],
257 257 self._manifestdelta.flags(path))
258 258 mfl = self._repo.manifestlog
259 259 try:
260 260 node, flag = mfl[self._changeset.manifest].find(path)
261 261 except KeyError:
262 262 raise error.ManifestLookupError(self._node, path,
263 263 _('not found in manifest'))
264 264
265 265 return node, flag
266 266
267 267 def filenode(self, path):
268 268 return self._fileinfo(path)[0]
269 269
270 270 def flags(self, path):
271 271 try:
272 272 return self._fileinfo(path)[1]
273 273 except error.LookupError:
274 274 return ''
275 275
276 276 @propertycache
277 277 def _copies(self):
278 278 return copies.computechangesetcopies(self)
279 279 def p1copies(self):
280 280 return self._copies[0]
281 281 def p2copies(self):
282 282 return self._copies[1]
283 283
284 284 def sub(self, path, allowcreate=True):
285 285 '''return a subrepo for the stored revision of path, never wdir()'''
286 286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
287 287
288 288 def nullsub(self, path, pctx):
289 289 return subrepo.nullsubrepo(self, path, pctx)
290 290
291 291 def workingsub(self, path):
292 292 '''return a subrepo for the stored revision, or wdir if this is a wdir
293 293 context.
294 294 '''
295 295 return subrepo.subrepo(self, path, allowwdir=True)
296 296
297 297 def match(self, pats=None, include=None, exclude=None, default='glob',
298 298 listsubrepos=False, badfn=None):
299 299 r = self._repo
300 300 return matchmod.match(r.root, r.getcwd(), pats,
301 301 include, exclude, default,
302 302 auditor=r.nofsauditor, ctx=self,
303 303 listsubrepos=listsubrepos, badfn=badfn)
304 304
305 305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
306 306 losedatafn=None, pathfn=None, copy=None,
307 307 copysourcematch=None, hunksfilterfn=None):
308 308 """Returns a diff generator for the given contexts and matcher"""
309 309 if ctx2 is None:
310 310 ctx2 = self.p1()
311 311 if ctx2 is not None:
312 312 ctx2 = self._repo[ctx2]
313 313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
314 314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
315 315 copy=copy, copysourcematch=copysourcematch,
316 316 hunksfilterfn=hunksfilterfn)
317 317
318 318 def dirs(self):
319 319 return self._manifest.dirs()
320 320
321 321 def hasdir(self, dir):
322 322 return self._manifest.hasdir(dir)
323 323
324 324 def status(self, other=None, match=None, listignored=False,
325 325 listclean=False, listunknown=False, listsubrepos=False):
326 326 """return status of files between two nodes or node and working
327 327 directory.
328 328
329 329 If other is None, compare this node with working directory.
330 330
331 331 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 332 """
333 333
334 334 ctx1 = self
335 335 ctx2 = self._repo[other]
336 336
337 337 # This next code block is, admittedly, fragile logic that tests for
338 338 # reversing the contexts and wouldn't need to exist if it weren't for
339 339 # the fast (and common) code path of comparing the working directory
340 340 # with its first parent.
341 341 #
342 342 # What we're aiming for here is the ability to call:
343 343 #
344 344 # workingctx.status(parentctx)
345 345 #
346 346 # If we always built the manifest for each context and compared those,
347 347 # then we'd be done. But the special case of the above call means we
348 348 # just copy the manifest of the parent.
349 349 reversed = False
350 350 if (not isinstance(ctx1, changectx)
351 351 and isinstance(ctx2, changectx)):
352 352 reversed = True
353 353 ctx1, ctx2 = ctx2, ctx1
354 354
355 355 match = self._repo.narrowmatch(match)
356 356 match = ctx2._matchstatus(ctx1, match)
357 357 r = scmutil.status([], [], [], [], [], [], [])
358 358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 359 listunknown)
360 360
361 361 if reversed:
362 362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 363 # these make no sense to reverse.
364 364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 365 r.clean)
366 366
367 367 if listsubrepos:
368 368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 369 try:
370 370 rev2 = ctx2.subrev(subpath)
371 371 except KeyError:
372 372 # A subrepo that existed in node1 was deleted between
373 373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 374 # won't contain that subpath. The best we can do ignore it.
375 375 rev2 = None
376 376 submatch = matchmod.subdirmatcher(subpath, match)
377 377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 378 clean=listclean, unknown=listunknown,
379 379 listsubrepos=True)
380 380 for rfiles, sfiles in zip(r, s):
381 381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382 382
383 383 for l in r:
384 384 l.sort()
385 385
386 386 return r
387 387
388 388 class changectx(basectx):
389 389 """A changecontext object makes access to data related to a particular
390 390 changeset convenient. It represents a read-only context already present in
391 391 the repo."""
392 392 def __init__(self, repo, rev, node):
393 393 super(changectx, self).__init__(repo)
394 394 self._rev = rev
395 395 self._node = node
396 396
397 397 def __hash__(self):
398 398 try:
399 399 return hash(self._rev)
400 400 except AttributeError:
401 401 return id(self)
402 402
403 403 def __nonzero__(self):
404 404 return self._rev != nullrev
405 405
406 406 __bool__ = __nonzero__
407 407
408 408 @propertycache
409 409 def _changeset(self):
410 410 return self._repo.changelog.changelogrevision(self.rev())
411 411
412 412 @propertycache
413 413 def _manifest(self):
414 414 return self._manifestctx.read()
415 415
416 416 @property
417 417 def _manifestctx(self):
418 418 return self._repo.manifestlog[self._changeset.manifest]
419 419
420 420 @propertycache
421 421 def _manifestdelta(self):
422 422 return self._manifestctx.readdelta()
423 423
424 424 @propertycache
425 425 def _parents(self):
426 426 repo = self._repo
427 427 p1, p2 = repo.changelog.parentrevs(self._rev)
428 428 if p2 == nullrev:
429 429 return [repo[p1]]
430 430 return [repo[p1], repo[p2]]
431 431
432 432 def changeset(self):
433 433 c = self._changeset
434 434 return (
435 435 c.manifest,
436 436 c.user,
437 437 c.date,
438 438 c.files,
439 439 c.description,
440 440 c.extra,
441 441 )
442 442 def manifestnode(self):
443 443 return self._changeset.manifest
444 444
445 445 def user(self):
446 446 return self._changeset.user
447 447 def date(self):
448 448 return self._changeset.date
449 449 def files(self):
450 450 return self._changeset.files
451 451 def filesmodified(self):
452 452 modified = set(self.files())
453 453 modified.difference_update(self.filesadded())
454 454 modified.difference_update(self.filesremoved())
455 455 return sorted(modified)
456 456 def filesadded(self):
457 457 source = self._repo.ui.config('experimental', 'copies.read-from')
458 458 if (source == 'changeset-only' or
459 459 (source == 'compatibility' and
460 460 self._changeset.filesadded is not None)):
461 461 return self._changeset.filesadded or []
462
463 added = []
464 for f in self.files():
465 if not any(f in p for p in self.parents()):
466 added.append(f)
467 return added
462 return scmutil.computechangesetfilesadded(self)
468 463 def filesremoved(self):
469 464 source = self._repo.ui.config('experimental', 'copies.read-from')
470 465 if (source == 'changeset-only' or
471 466 (source == 'compatibility' and
472 467 self._changeset.filesremoved is not None)):
473 468 return self._changeset.filesremoved or []
474 469
475 470 removed = []
476 471 for f in self.files():
477 472 if f not in self:
478 473 removed.append(f)
479 474 return removed
480 475
481 476 @propertycache
482 477 def _copies(self):
483 478 source = self._repo.ui.config('experimental', 'copies.read-from')
484 479 p1copies = self._changeset.p1copies
485 480 p2copies = self._changeset.p2copies
486 481 # If config says to get copy metadata only from changeset, then return
487 482 # that, defaulting to {} if there was no copy metadata.
488 483 # In compatibility mode, we return copy data from the changeset if
489 484 # it was recorded there, and otherwise we fall back to getting it from
490 485 # the filelogs (below).
491 486 if (source == 'changeset-only' or
492 487 (source == 'compatibility' and p1copies is not None)):
493 488 return p1copies or {}, p2copies or {}
494 489
495 490 # Otherwise (config said to read only from filelog, or we are in
496 491 # compatiblity mode and there is not data in the changeset), we get
497 492 # the copy metadata from the filelogs.
498 493 return super(changectx, self)._copies
499 494 def description(self):
500 495 return self._changeset.description
501 496 def branch(self):
502 497 return encoding.tolocal(self._changeset.extra.get("branch"))
503 498 def closesbranch(self):
504 499 return 'close' in self._changeset.extra
505 500 def extra(self):
506 501 """Return a dict of extra information."""
507 502 return self._changeset.extra
508 503 def tags(self):
509 504 """Return a list of byte tag names"""
510 505 return self._repo.nodetags(self._node)
511 506 def bookmarks(self):
512 507 """Return a list of byte bookmark names."""
513 508 return self._repo.nodebookmarks(self._node)
514 509 def phase(self):
515 510 return self._repo._phasecache.phase(self._repo, self._rev)
516 511 def hidden(self):
517 512 return self._rev in repoview.filterrevs(self._repo, 'visible')
518 513
519 514 def isinmemory(self):
520 515 return False
521 516
522 517 def children(self):
523 518 """return list of changectx contexts for each child changeset.
524 519
525 520 This returns only the immediate child changesets. Use descendants() to
526 521 recursively walk children.
527 522 """
528 523 c = self._repo.changelog.children(self._node)
529 524 return [self._repo[x] for x in c]
530 525
531 526 def ancestors(self):
532 527 for a in self._repo.changelog.ancestors([self._rev]):
533 528 yield self._repo[a]
534 529
535 530 def descendants(self):
536 531 """Recursively yield all children of the changeset.
537 532
538 533 For just the immediate children, use children()
539 534 """
540 535 for d in self._repo.changelog.descendants([self._rev]):
541 536 yield self._repo[d]
542 537
543 538 def filectx(self, path, fileid=None, filelog=None):
544 539 """get a file context from this changeset"""
545 540 if fileid is None:
546 541 fileid = self.filenode(path)
547 542 return filectx(self._repo, path, fileid=fileid,
548 543 changectx=self, filelog=filelog)
549 544
550 545 def ancestor(self, c2, warn=False):
551 546 """return the "best" ancestor context of self and c2
552 547
553 548 If there are multiple candidates, it will show a message and check
554 549 merge.preferancestor configuration before falling back to the
555 550 revlog ancestor."""
556 551 # deal with workingctxs
557 552 n2 = c2._node
558 553 if n2 is None:
559 554 n2 = c2._parents[0]._node
560 555 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
561 556 if not cahs:
562 557 anc = nullid
563 558 elif len(cahs) == 1:
564 559 anc = cahs[0]
565 560 else:
566 561 # experimental config: merge.preferancestor
567 562 for r in self._repo.ui.configlist('merge', 'preferancestor'):
568 563 try:
569 564 ctx = scmutil.revsymbol(self._repo, r)
570 565 except error.RepoLookupError:
571 566 continue
572 567 anc = ctx.node()
573 568 if anc in cahs:
574 569 break
575 570 else:
576 571 anc = self._repo.changelog.ancestor(self._node, n2)
577 572 if warn:
578 573 self._repo.ui.status(
579 574 (_("note: using %s as ancestor of %s and %s\n") %
580 575 (short(anc), short(self._node), short(n2))) +
581 576 ''.join(_(" alternatively, use --config "
582 577 "merge.preferancestor=%s\n") %
583 578 short(n) for n in sorted(cahs) if n != anc))
584 579 return self._repo[anc]
585 580
586 581 def isancestorof(self, other):
587 582 """True if this changeset is an ancestor of other"""
588 583 return self._repo.changelog.isancestorrev(self._rev, other._rev)
589 584
590 585 def walk(self, match):
591 586 '''Generates matching file names.'''
592 587
593 588 # Wrap match.bad method to have message with nodeid
594 589 def bad(fn, msg):
595 590 # The manifest doesn't know about subrepos, so don't complain about
596 591 # paths into valid subrepos.
597 592 if any(fn == s or fn.startswith(s + '/')
598 593 for s in self.substate):
599 594 return
600 595 match.bad(fn, _('no such file in rev %s') % self)
601 596
602 597 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
603 598 return self._manifest.walk(m)
604 599
605 600 def matches(self, match):
606 601 return self.walk(match)
607 602
608 603 class basefilectx(object):
609 604 """A filecontext object represents the common logic for its children:
610 605 filectx: read-only access to a filerevision that is already present
611 606 in the repo,
612 607 workingfilectx: a filecontext that represents files from the working
613 608 directory,
614 609 memfilectx: a filecontext that represents files in-memory,
615 610 """
616 611 @propertycache
617 612 def _filelog(self):
618 613 return self._repo.file(self._path)
619 614
620 615 @propertycache
621 616 def _changeid(self):
622 617 if r'_changectx' in self.__dict__:
623 618 return self._changectx.rev()
624 619 elif r'_descendantrev' in self.__dict__:
625 620 # this file context was created from a revision with a known
626 621 # descendant, we can (lazily) correct for linkrev aliases
627 622 return self._adjustlinkrev(self._descendantrev)
628 623 else:
629 624 return self._filelog.linkrev(self._filerev)
630 625
631 626 @propertycache
632 627 def _filenode(self):
633 628 if r'_fileid' in self.__dict__:
634 629 return self._filelog.lookup(self._fileid)
635 630 else:
636 631 return self._changectx.filenode(self._path)
637 632
638 633 @propertycache
639 634 def _filerev(self):
640 635 return self._filelog.rev(self._filenode)
641 636
642 637 @propertycache
643 638 def _repopath(self):
644 639 return self._path
645 640
646 641 def __nonzero__(self):
647 642 try:
648 643 self._filenode
649 644 return True
650 645 except error.LookupError:
651 646 # file is missing
652 647 return False
653 648
654 649 __bool__ = __nonzero__
655 650
656 651 def __bytes__(self):
657 652 try:
658 653 return "%s@%s" % (self.path(), self._changectx)
659 654 except error.LookupError:
660 655 return "%s@???" % self.path()
661 656
662 657 __str__ = encoding.strmethod(__bytes__)
663 658
664 659 def __repr__(self):
665 660 return r"<%s %s>" % (type(self).__name__, str(self))
666 661
667 662 def __hash__(self):
668 663 try:
669 664 return hash((self._path, self._filenode))
670 665 except AttributeError:
671 666 return id(self)
672 667
673 668 def __eq__(self, other):
674 669 try:
675 670 return (type(self) == type(other) and self._path == other._path
676 671 and self._filenode == other._filenode)
677 672 except AttributeError:
678 673 return False
679 674
680 675 def __ne__(self, other):
681 676 return not (self == other)
682 677
683 678 def filerev(self):
684 679 return self._filerev
685 680 def filenode(self):
686 681 return self._filenode
687 682 @propertycache
688 683 def _flags(self):
689 684 return self._changectx.flags(self._path)
690 685 def flags(self):
691 686 return self._flags
692 687 def filelog(self):
693 688 return self._filelog
694 689 def rev(self):
695 690 return self._changeid
696 691 def linkrev(self):
697 692 return self._filelog.linkrev(self._filerev)
698 693 def node(self):
699 694 return self._changectx.node()
700 695 def hex(self):
701 696 return self._changectx.hex()
702 697 def user(self):
703 698 return self._changectx.user()
704 699 def date(self):
705 700 return self._changectx.date()
706 701 def files(self):
707 702 return self._changectx.files()
708 703 def description(self):
709 704 return self._changectx.description()
710 705 def branch(self):
711 706 return self._changectx.branch()
712 707 def extra(self):
713 708 return self._changectx.extra()
714 709 def phase(self):
715 710 return self._changectx.phase()
716 711 def phasestr(self):
717 712 return self._changectx.phasestr()
718 713 def obsolete(self):
719 714 return self._changectx.obsolete()
720 715 def instabilities(self):
721 716 return self._changectx.instabilities()
722 717 def manifest(self):
723 718 return self._changectx.manifest()
724 719 def changectx(self):
725 720 return self._changectx
726 721 def renamed(self):
727 722 return self._copied
728 723 def copysource(self):
729 724 return self._copied and self._copied[0]
730 725 def repo(self):
731 726 return self._repo
732 727 def size(self):
733 728 return len(self.data())
734 729
735 730 def path(self):
736 731 return self._path
737 732
738 733 def isbinary(self):
739 734 try:
740 735 return stringutil.binary(self.data())
741 736 except IOError:
742 737 return False
743 738 def isexec(self):
744 739 return 'x' in self.flags()
745 740 def islink(self):
746 741 return 'l' in self.flags()
747 742
748 743 def isabsent(self):
749 744 """whether this filectx represents a file not in self._changectx
750 745
751 746 This is mainly for merge code to detect change/delete conflicts. This is
752 747 expected to be True for all subclasses of basectx."""
753 748 return False
754 749
755 750 _customcmp = False
756 751 def cmp(self, fctx):
757 752 """compare with other file context
758 753
759 754 returns True if different than fctx.
760 755 """
761 756 if fctx._customcmp:
762 757 return fctx.cmp(self)
763 758
764 759 if self._filenode is None:
765 760 raise error.ProgrammingError(
766 761 'filectx.cmp() must be reimplemented if not backed by revlog')
767 762
768 763 if fctx._filenode is None:
769 764 if self._repo._encodefilterpats:
770 765 # can't rely on size() because wdir content may be decoded
771 766 return self._filelog.cmp(self._filenode, fctx.data())
772 767 if self.size() - 4 == fctx.size():
773 768 # size() can match:
774 769 # if file data starts with '\1\n', empty metadata block is
775 770 # prepended, which adds 4 bytes to filelog.size().
776 771 return self._filelog.cmp(self._filenode, fctx.data())
777 772 if self.size() == fctx.size():
778 773 # size() matches: need to compare content
779 774 return self._filelog.cmp(self._filenode, fctx.data())
780 775
781 776 # size() differs
782 777 return True
783 778
784 779 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
785 780 """return the first ancestor of <srcrev> introducing <fnode>
786 781
787 782 If the linkrev of the file revision does not point to an ancestor of
788 783 srcrev, we'll walk down the ancestors until we find one introducing
789 784 this file revision.
790 785
791 786 :srcrev: the changeset revision we search ancestors from
792 787 :inclusive: if true, the src revision will also be checked
793 788 :stoprev: an optional revision to stop the walk at. If no introduction
794 789 of this file content could be found before this floor
795 790 revision, the function will returns "None" and stops its
796 791 iteration.
797 792 """
798 793 repo = self._repo
799 794 cl = repo.unfiltered().changelog
800 795 mfl = repo.manifestlog
801 796 # fetch the linkrev
802 797 lkr = self.linkrev()
803 798 if srcrev == lkr:
804 799 return lkr
805 800 # hack to reuse ancestor computation when searching for renames
806 801 memberanc = getattr(self, '_ancestrycontext', None)
807 802 iteranc = None
808 803 if srcrev is None:
809 804 # wctx case, used by workingfilectx during mergecopy
810 805 revs = [p.rev() for p in self._repo[None].parents()]
811 806 inclusive = True # we skipped the real (revless) source
812 807 else:
813 808 revs = [srcrev]
814 809 if memberanc is None:
815 810 memberanc = iteranc = cl.ancestors(revs, lkr,
816 811 inclusive=inclusive)
817 812 # check if this linkrev is an ancestor of srcrev
818 813 if lkr not in memberanc:
819 814 if iteranc is None:
820 815 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
821 816 fnode = self._filenode
822 817 path = self._path
823 818 for a in iteranc:
824 819 if stoprev is not None and a < stoprev:
825 820 return None
826 821 ac = cl.read(a) # get changeset data (we avoid object creation)
827 822 if path in ac[3]: # checking the 'files' field.
828 823 # The file has been touched, check if the content is
829 824 # similar to the one we search for.
830 825 if fnode == mfl[ac[0]].readfast().get(path):
831 826 return a
832 827 # In theory, we should never get out of that loop without a result.
833 828 # But if manifest uses a buggy file revision (not children of the
834 829 # one it replaces) we could. Such a buggy situation will likely
835 830 # result is crash somewhere else at to some point.
836 831 return lkr
837 832
838 833 def isintroducedafter(self, changelogrev):
839 834 """True if a filectx has been introduced after a given floor revision
840 835 """
841 836 if self.linkrev() >= changelogrev:
842 837 return True
843 838 introrev = self._introrev(stoprev=changelogrev)
844 839 if introrev is None:
845 840 return False
846 841 return introrev >= changelogrev
847 842
848 843 def introrev(self):
849 844 """return the rev of the changeset which introduced this file revision
850 845
851 846 This method is different from linkrev because it take into account the
852 847 changeset the filectx was created from. It ensures the returned
853 848 revision is one of its ancestors. This prevents bugs from
854 849 'linkrev-shadowing' when a file revision is used by multiple
855 850 changesets.
856 851 """
857 852 return self._introrev()
858 853
859 854 def _introrev(self, stoprev=None):
860 855 """
861 856 Same as `introrev` but, with an extra argument to limit changelog
862 857 iteration range in some internal usecase.
863 858
864 859 If `stoprev` is set, the `introrev` will not be searched past that
865 860 `stoprev` revision and "None" might be returned. This is useful to
866 861 limit the iteration range.
867 862 """
868 863 toprev = None
869 864 attrs = vars(self)
870 865 if r'_changeid' in attrs:
871 866 # We have a cached value already
872 867 toprev = self._changeid
873 868 elif r'_changectx' in attrs:
874 869 # We know which changelog entry we are coming from
875 870 toprev = self._changectx.rev()
876 871
877 872 if toprev is not None:
878 873 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
879 874 elif r'_descendantrev' in attrs:
880 875 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
881 876 # be nice and cache the result of the computation
882 877 if introrev is not None:
883 878 self._changeid = introrev
884 879 return introrev
885 880 else:
886 881 return self.linkrev()
887 882
888 883 def introfilectx(self):
889 884 """Return filectx having identical contents, but pointing to the
890 885 changeset revision where this filectx was introduced"""
891 886 introrev = self.introrev()
892 887 if self.rev() == introrev:
893 888 return self
894 889 return self.filectx(self.filenode(), changeid=introrev)
895 890
896 891 def _parentfilectx(self, path, fileid, filelog):
897 892 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
898 893 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
899 894 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
900 895 # If self is associated with a changeset (probably explicitly
901 896 # fed), ensure the created filectx is associated with a
902 897 # changeset that is an ancestor of self.changectx.
903 898 # This lets us later use _adjustlinkrev to get a correct link.
904 899 fctx._descendantrev = self.rev()
905 900 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
906 901 elif r'_descendantrev' in vars(self):
907 902 # Otherwise propagate _descendantrev if we have one associated.
908 903 fctx._descendantrev = self._descendantrev
909 904 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
910 905 return fctx
911 906
912 907 def parents(self):
913 908 _path = self._path
914 909 fl = self._filelog
915 910 parents = self._filelog.parents(self._filenode)
916 911 pl = [(_path, node, fl) for node in parents if node != nullid]
917 912
918 913 r = fl.renamed(self._filenode)
919 914 if r:
920 915 # - In the simple rename case, both parent are nullid, pl is empty.
921 916 # - In case of merge, only one of the parent is null id and should
922 917 # be replaced with the rename information. This parent is -always-
923 918 # the first one.
924 919 #
925 920 # As null id have always been filtered out in the previous list
926 921 # comprehension, inserting to 0 will always result in "replacing
927 922 # first nullid parent with rename information.
928 923 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
929 924
930 925 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
931 926
932 927 def p1(self):
933 928 return self.parents()[0]
934 929
935 930 def p2(self):
936 931 p = self.parents()
937 932 if len(p) == 2:
938 933 return p[1]
939 934 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
940 935
941 936 def annotate(self, follow=False, skiprevs=None, diffopts=None):
942 937 """Returns a list of annotateline objects for each line in the file
943 938
944 939 - line.fctx is the filectx of the node where that line was last changed
945 940 - line.lineno is the line number at the first appearance in the managed
946 941 file
947 942 - line.text is the data on that line (including newline character)
948 943 """
949 944 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
950 945
951 946 def parents(f):
952 947 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
953 948 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
954 949 # from the topmost introrev (= srcrev) down to p.linkrev() if it
955 950 # isn't an ancestor of the srcrev.
956 951 f._changeid
957 952 pl = f.parents()
958 953
959 954 # Don't return renamed parents if we aren't following.
960 955 if not follow:
961 956 pl = [p for p in pl if p.path() == f.path()]
962 957
963 958 # renamed filectx won't have a filelog yet, so set it
964 959 # from the cache to save time
965 960 for p in pl:
966 961 if not r'_filelog' in p.__dict__:
967 962 p._filelog = getlog(p.path())
968 963
969 964 return pl
970 965
971 966 # use linkrev to find the first changeset where self appeared
972 967 base = self.introfilectx()
973 968 if getattr(base, '_ancestrycontext', None) is None:
974 969 cl = self._repo.changelog
975 970 if base.rev() is None:
976 971 # wctx is not inclusive, but works because _ancestrycontext
977 972 # is used to test filelog revisions
978 973 ac = cl.ancestors([p.rev() for p in base.parents()],
979 974 inclusive=True)
980 975 else:
981 976 ac = cl.ancestors([base.rev()], inclusive=True)
982 977 base._ancestrycontext = ac
983 978
984 979 return dagop.annotate(base, parents, skiprevs=skiprevs,
985 980 diffopts=diffopts)
986 981
987 982 def ancestors(self, followfirst=False):
988 983 visit = {}
989 984 c = self
990 985 if followfirst:
991 986 cut = 1
992 987 else:
993 988 cut = None
994 989
995 990 while True:
996 991 for parent in c.parents()[:cut]:
997 992 visit[(parent.linkrev(), parent.filenode())] = parent
998 993 if not visit:
999 994 break
1000 995 c = visit.pop(max(visit))
1001 996 yield c
1002 997
1003 998 def decodeddata(self):
1004 999 """Returns `data()` after running repository decoding filters.
1005 1000
1006 1001 This is often equivalent to how the data would be expressed on disk.
1007 1002 """
1008 1003 return self._repo.wwritedata(self.path(), self.data())
1009 1004
1010 1005 class filectx(basefilectx):
1011 1006 """A filecontext object makes access to data related to a particular
1012 1007 filerevision convenient."""
1013 1008 def __init__(self, repo, path, changeid=None, fileid=None,
1014 1009 filelog=None, changectx=None):
1015 1010 """changeid must be a revision number, if specified.
1016 1011 fileid can be a file revision or node."""
1017 1012 self._repo = repo
1018 1013 self._path = path
1019 1014
1020 1015 assert (changeid is not None
1021 1016 or fileid is not None
1022 1017 or changectx is not None), (
1023 1018 "bad args: changeid=%r, fileid=%r, changectx=%r"
1024 1019 % (changeid, fileid, changectx))
1025 1020
1026 1021 if filelog is not None:
1027 1022 self._filelog = filelog
1028 1023
1029 1024 if changeid is not None:
1030 1025 self._changeid = changeid
1031 1026 if changectx is not None:
1032 1027 self._changectx = changectx
1033 1028 if fileid is not None:
1034 1029 self._fileid = fileid
1035 1030
1036 1031 @propertycache
1037 1032 def _changectx(self):
1038 1033 try:
1039 1034 return self._repo[self._changeid]
1040 1035 except error.FilteredRepoLookupError:
1041 1036 # Linkrev may point to any revision in the repository. When the
1042 1037 # repository is filtered this may lead to `filectx` trying to build
1043 1038 # `changectx` for filtered revision. In such case we fallback to
1044 1039 # creating `changectx` on the unfiltered version of the reposition.
1045 1040 # This fallback should not be an issue because `changectx` from
1046 1041 # `filectx` are not used in complex operations that care about
1047 1042 # filtering.
1048 1043 #
1049 1044 # This fallback is a cheap and dirty fix that prevent several
1050 1045 # crashes. It does not ensure the behavior is correct. However the
1051 1046 # behavior was not correct before filtering either and "incorrect
1052 1047 # behavior" is seen as better as "crash"
1053 1048 #
1054 1049 # Linkrevs have several serious troubles with filtering that are
1055 1050 # complicated to solve. Proper handling of the issue here should be
1056 1051 # considered when solving linkrev issue are on the table.
1057 1052 return self._repo.unfiltered()[self._changeid]
1058 1053
1059 1054 def filectx(self, fileid, changeid=None):
1060 1055 '''opens an arbitrary revision of the file without
1061 1056 opening a new filelog'''
1062 1057 return filectx(self._repo, self._path, fileid=fileid,
1063 1058 filelog=self._filelog, changeid=changeid)
1064 1059
1065 1060 def rawdata(self):
1066 1061 return self._filelog.revision(self._filenode, raw=True)
1067 1062
1068 1063 def rawflags(self):
1069 1064 """low-level revlog flags"""
1070 1065 return self._filelog.flags(self._filerev)
1071 1066
1072 1067 def data(self):
1073 1068 try:
1074 1069 return self._filelog.read(self._filenode)
1075 1070 except error.CensoredNodeError:
1076 1071 if self._repo.ui.config("censor", "policy") == "ignore":
1077 1072 return ""
1078 1073 raise error.Abort(_("censored node: %s") % short(self._filenode),
1079 1074 hint=_("set censor.policy to ignore errors"))
1080 1075
1081 1076 def size(self):
1082 1077 return self._filelog.size(self._filerev)
1083 1078
1084 1079 @propertycache
1085 1080 def _copied(self):
1086 1081 """check if file was actually renamed in this changeset revision
1087 1082
1088 1083 If rename logged in file revision, we report copy for changeset only
1089 1084 if file revisions linkrev points back to the changeset in question
1090 1085 or both changeset parents contain different file revisions.
1091 1086 """
1092 1087
1093 1088 renamed = self._filelog.renamed(self._filenode)
1094 1089 if not renamed:
1095 1090 return None
1096 1091
1097 1092 if self.rev() == self.linkrev():
1098 1093 return renamed
1099 1094
1100 1095 name = self.path()
1101 1096 fnode = self._filenode
1102 1097 for p in self._changectx.parents():
1103 1098 try:
1104 1099 if fnode == p.filenode(name):
1105 1100 return None
1106 1101 except error.LookupError:
1107 1102 pass
1108 1103 return renamed
1109 1104
1110 1105 def children(self):
1111 1106 # hard for renames
1112 1107 c = self._filelog.children(self._filenode)
1113 1108 return [filectx(self._repo, self._path, fileid=x,
1114 1109 filelog=self._filelog) for x in c]
1115 1110
1116 1111 class committablectx(basectx):
1117 1112 """A committablectx object provides common functionality for a context that
1118 1113 wants the ability to commit, e.g. workingctx or memctx."""
1119 1114 def __init__(self, repo, text="", user=None, date=None, extra=None,
1120 1115 changes=None, branch=None):
1121 1116 super(committablectx, self).__init__(repo)
1122 1117 self._rev = None
1123 1118 self._node = None
1124 1119 self._text = text
1125 1120 if date:
1126 1121 self._date = dateutil.parsedate(date)
1127 1122 if user:
1128 1123 self._user = user
1129 1124 if changes:
1130 1125 self._status = changes
1131 1126
1132 1127 self._extra = {}
1133 1128 if extra:
1134 1129 self._extra = extra.copy()
1135 1130 if branch is not None:
1136 1131 self._extra['branch'] = encoding.fromlocal(branch)
1137 1132 if not self._extra.get('branch'):
1138 1133 self._extra['branch'] = 'default'
1139 1134
1140 1135 def __bytes__(self):
1141 1136 return bytes(self._parents[0]) + "+"
1142 1137
1143 1138 __str__ = encoding.strmethod(__bytes__)
1144 1139
1145 1140 def __nonzero__(self):
1146 1141 return True
1147 1142
1148 1143 __bool__ = __nonzero__
1149 1144
1150 1145 @propertycache
1151 1146 def _status(self):
1152 1147 return self._repo.status()
1153 1148
1154 1149 @propertycache
1155 1150 def _user(self):
1156 1151 return self._repo.ui.username()
1157 1152
1158 1153 @propertycache
1159 1154 def _date(self):
1160 1155 ui = self._repo.ui
1161 1156 date = ui.configdate('devel', 'default-date')
1162 1157 if date is None:
1163 1158 date = dateutil.makedate()
1164 1159 return date
1165 1160
1166 1161 def subrev(self, subpath):
1167 1162 return None
1168 1163
1169 1164 def manifestnode(self):
1170 1165 return None
1171 1166 def user(self):
1172 1167 return self._user or self._repo.ui.username()
1173 1168 def date(self):
1174 1169 return self._date
1175 1170 def description(self):
1176 1171 return self._text
1177 1172 def files(self):
1178 1173 return sorted(self._status.modified + self._status.added +
1179 1174 self._status.removed)
1180 1175 def modified(self):
1181 1176 return self._status.modified
1182 1177 def added(self):
1183 1178 return self._status.added
1184 1179 def removed(self):
1185 1180 return self._status.removed
1186 1181 def deleted(self):
1187 1182 return self._status.deleted
1188 1183 filesmodified = modified
1189 1184 filesadded = added
1190 1185 filesremoved = removed
1191 1186
1192 1187 def branch(self):
1193 1188 return encoding.tolocal(self._extra['branch'])
1194 1189 def closesbranch(self):
1195 1190 return 'close' in self._extra
1196 1191 def extra(self):
1197 1192 return self._extra
1198 1193
1199 1194 def isinmemory(self):
1200 1195 return False
1201 1196
1202 1197 def tags(self):
1203 1198 return []
1204 1199
1205 1200 def bookmarks(self):
1206 1201 b = []
1207 1202 for p in self.parents():
1208 1203 b.extend(p.bookmarks())
1209 1204 return b
1210 1205
1211 1206 def phase(self):
1212 1207 phase = phases.draft # default phase to draft
1213 1208 for p in self.parents():
1214 1209 phase = max(phase, p.phase())
1215 1210 return phase
1216 1211
1217 1212 def hidden(self):
1218 1213 return False
1219 1214
1220 1215 def children(self):
1221 1216 return []
1222 1217
1223 1218 def ancestor(self, c2):
1224 1219 """return the "best" ancestor context of self and c2"""
1225 1220 return self._parents[0].ancestor(c2) # punt on two parents for now
1226 1221
1227 1222 def ancestors(self):
1228 1223 for p in self._parents:
1229 1224 yield p
1230 1225 for a in self._repo.changelog.ancestors(
1231 1226 [p.rev() for p in self._parents]):
1232 1227 yield self._repo[a]
1233 1228
1234 1229 def markcommitted(self, node):
1235 1230 """Perform post-commit cleanup necessary after committing this ctx
1236 1231
1237 1232 Specifically, this updates backing stores this working context
1238 1233 wraps to reflect the fact that the changes reflected by this
1239 1234 workingctx have been committed. For example, it marks
1240 1235 modified and added files as normal in the dirstate.
1241 1236
1242 1237 """
1243 1238
1244 1239 def dirty(self, missing=False, merge=True, branch=True):
1245 1240 return False
1246 1241
1247 1242 class workingctx(committablectx):
1248 1243 """A workingctx object makes access to data related to
1249 1244 the current working directory convenient.
1250 1245 date - any valid date string or (unixtime, offset), or None.
1251 1246 user - username string, or None.
1252 1247 extra - a dictionary of extra values, or None.
1253 1248 changes - a list of file lists as returned by localrepo.status()
1254 1249 or None to use the repository status.
1255 1250 """
1256 1251 def __init__(self, repo, text="", user=None, date=None, extra=None,
1257 1252 changes=None):
1258 1253 branch = None
1259 1254 if not extra or 'branch' not in extra:
1260 1255 try:
1261 1256 branch = repo.dirstate.branch()
1262 1257 except UnicodeDecodeError:
1263 1258 raise error.Abort(_('branch name not in UTF-8!'))
1264 1259 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1265 1260 branch=branch)
1266 1261
1267 1262 def __iter__(self):
1268 1263 d = self._repo.dirstate
1269 1264 for f in d:
1270 1265 if d[f] != 'r':
1271 1266 yield f
1272 1267
1273 1268 def __contains__(self, key):
1274 1269 return self._repo.dirstate[key] not in "?r"
1275 1270
1276 1271 def hex(self):
1277 1272 return wdirhex
1278 1273
1279 1274 @propertycache
1280 1275 def _parents(self):
1281 1276 p = self._repo.dirstate.parents()
1282 1277 if p[1] == nullid:
1283 1278 p = p[:-1]
1284 1279 # use unfiltered repo to delay/avoid loading obsmarkers
1285 1280 unfi = self._repo.unfiltered()
1286 1281 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1287 1282
1288 1283 def _fileinfo(self, path):
1289 1284 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1290 1285 self._manifest
1291 1286 return super(workingctx, self)._fileinfo(path)
1292 1287
1293 1288 def _buildflagfunc(self):
1294 1289 # Create a fallback function for getting file flags when the
1295 1290 # filesystem doesn't support them
1296 1291
1297 1292 copiesget = self._repo.dirstate.copies().get
1298 1293 parents = self.parents()
1299 1294 if len(parents) < 2:
1300 1295 # when we have one parent, it's easy: copy from parent
1301 1296 man = parents[0].manifest()
1302 1297 def func(f):
1303 1298 f = copiesget(f, f)
1304 1299 return man.flags(f)
1305 1300 else:
1306 1301 # merges are tricky: we try to reconstruct the unstored
1307 1302 # result from the merge (issue1802)
1308 1303 p1, p2 = parents
1309 1304 pa = p1.ancestor(p2)
1310 1305 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1311 1306
1312 1307 def func(f):
1313 1308 f = copiesget(f, f) # may be wrong for merges with copies
1314 1309 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1315 1310 if fl1 == fl2:
1316 1311 return fl1
1317 1312 if fl1 == fla:
1318 1313 return fl2
1319 1314 if fl2 == fla:
1320 1315 return fl1
1321 1316 return '' # punt for conflicts
1322 1317
1323 1318 return func
1324 1319
1325 1320 @propertycache
1326 1321 def _flagfunc(self):
1327 1322 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1328 1323
1329 1324 def flags(self, path):
1330 1325 if r'_manifest' in self.__dict__:
1331 1326 try:
1332 1327 return self._manifest.flags(path)
1333 1328 except KeyError:
1334 1329 return ''
1335 1330
1336 1331 try:
1337 1332 return self._flagfunc(path)
1338 1333 except OSError:
1339 1334 return ''
1340 1335
1341 1336 def filectx(self, path, filelog=None):
1342 1337 """get a file context from the working directory"""
1343 1338 return workingfilectx(self._repo, path, workingctx=self,
1344 1339 filelog=filelog)
1345 1340
1346 1341 def dirty(self, missing=False, merge=True, branch=True):
1347 1342 "check whether a working directory is modified"
1348 1343 # check subrepos first
1349 1344 for s in sorted(self.substate):
1350 1345 if self.sub(s).dirty(missing=missing):
1351 1346 return True
1352 1347 # check current working dir
1353 1348 return ((merge and self.p2()) or
1354 1349 (branch and self.branch() != self.p1().branch()) or
1355 1350 self.modified() or self.added() or self.removed() or
1356 1351 (missing and self.deleted()))
1357 1352
1358 1353 def add(self, list, prefix=""):
1359 1354 with self._repo.wlock():
1360 1355 ui, ds = self._repo.ui, self._repo.dirstate
1361 1356 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1362 1357 rejected = []
1363 1358 lstat = self._repo.wvfs.lstat
1364 1359 for f in list:
1365 1360 # ds.pathto() returns an absolute file when this is invoked from
1366 1361 # the keyword extension. That gets flagged as non-portable on
1367 1362 # Windows, since it contains the drive letter and colon.
1368 1363 scmutil.checkportable(ui, os.path.join(prefix, f))
1369 1364 try:
1370 1365 st = lstat(f)
1371 1366 except OSError:
1372 1367 ui.warn(_("%s does not exist!\n") % uipath(f))
1373 1368 rejected.append(f)
1374 1369 continue
1375 1370 limit = ui.configbytes('ui', 'large-file-limit')
1376 1371 if limit != 0 and st.st_size > limit:
1377 1372 ui.warn(_("%s: up to %d MB of RAM may be required "
1378 1373 "to manage this file\n"
1379 1374 "(use 'hg revert %s' to cancel the "
1380 1375 "pending addition)\n")
1381 1376 % (f, 3 * st.st_size // 1000000, uipath(f)))
1382 1377 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1383 1378 ui.warn(_("%s not added: only files and symlinks "
1384 1379 "supported currently\n") % uipath(f))
1385 1380 rejected.append(f)
1386 1381 elif ds[f] in 'amn':
1387 1382 ui.warn(_("%s already tracked!\n") % uipath(f))
1388 1383 elif ds[f] == 'r':
1389 1384 ds.normallookup(f)
1390 1385 else:
1391 1386 ds.add(f)
1392 1387 return rejected
1393 1388
1394 1389 def forget(self, files, prefix=""):
1395 1390 with self._repo.wlock():
1396 1391 ds = self._repo.dirstate
1397 1392 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1398 1393 rejected = []
1399 1394 for f in files:
1400 1395 if f not in ds:
1401 1396 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1402 1397 rejected.append(f)
1403 1398 elif ds[f] != 'a':
1404 1399 ds.remove(f)
1405 1400 else:
1406 1401 ds.drop(f)
1407 1402 return rejected
1408 1403
1409 1404 def copy(self, source, dest):
1410 1405 try:
1411 1406 st = self._repo.wvfs.lstat(dest)
1412 1407 except OSError as err:
1413 1408 if err.errno != errno.ENOENT:
1414 1409 raise
1415 1410 self._repo.ui.warn(_("%s does not exist!\n")
1416 1411 % self._repo.dirstate.pathto(dest))
1417 1412 return
1418 1413 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1419 1414 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1420 1415 "symbolic link\n")
1421 1416 % self._repo.dirstate.pathto(dest))
1422 1417 else:
1423 1418 with self._repo.wlock():
1424 1419 ds = self._repo.dirstate
1425 1420 if ds[dest] in '?':
1426 1421 ds.add(dest)
1427 1422 elif ds[dest] in 'r':
1428 1423 ds.normallookup(dest)
1429 1424 ds.copy(source, dest)
1430 1425
1431 1426 def match(self, pats=None, include=None, exclude=None, default='glob',
1432 1427 listsubrepos=False, badfn=None):
1433 1428 r = self._repo
1434 1429
1435 1430 # Only a case insensitive filesystem needs magic to translate user input
1436 1431 # to actual case in the filesystem.
1437 1432 icasefs = not util.fscasesensitive(r.root)
1438 1433 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1439 1434 default, auditor=r.auditor, ctx=self,
1440 1435 listsubrepos=listsubrepos, badfn=badfn,
1441 1436 icasefs=icasefs)
1442 1437
1443 1438 def _filtersuspectsymlink(self, files):
1444 1439 if not files or self._repo.dirstate._checklink:
1445 1440 return files
1446 1441
1447 1442 # Symlink placeholders may get non-symlink-like contents
1448 1443 # via user error or dereferencing by NFS or Samba servers,
1449 1444 # so we filter out any placeholders that don't look like a
1450 1445 # symlink
1451 1446 sane = []
1452 1447 for f in files:
1453 1448 if self.flags(f) == 'l':
1454 1449 d = self[f].data()
1455 1450 if (d == '' or len(d) >= 1024 or '\n' in d
1456 1451 or stringutil.binary(d)):
1457 1452 self._repo.ui.debug('ignoring suspect symlink placeholder'
1458 1453 ' "%s"\n' % f)
1459 1454 continue
1460 1455 sane.append(f)
1461 1456 return sane
1462 1457
1463 1458 def _checklookup(self, files):
1464 1459 # check for any possibly clean files
1465 1460 if not files:
1466 1461 return [], [], []
1467 1462
1468 1463 modified = []
1469 1464 deleted = []
1470 1465 fixup = []
1471 1466 pctx = self._parents[0]
1472 1467 # do a full compare of any files that might have changed
1473 1468 for f in sorted(files):
1474 1469 try:
1475 1470 # This will return True for a file that got replaced by a
1476 1471 # directory in the interim, but fixing that is pretty hard.
1477 1472 if (f not in pctx or self.flags(f) != pctx.flags(f)
1478 1473 or pctx[f].cmp(self[f])):
1479 1474 modified.append(f)
1480 1475 else:
1481 1476 fixup.append(f)
1482 1477 except (IOError, OSError):
1483 1478 # A file become inaccessible in between? Mark it as deleted,
1484 1479 # matching dirstate behavior (issue5584).
1485 1480 # The dirstate has more complex behavior around whether a
1486 1481 # missing file matches a directory, etc, but we don't need to
1487 1482 # bother with that: if f has made it to this point, we're sure
1488 1483 # it's in the dirstate.
1489 1484 deleted.append(f)
1490 1485
1491 1486 return modified, deleted, fixup
1492 1487
1493 1488 def _poststatusfixup(self, status, fixup):
1494 1489 """update dirstate for files that are actually clean"""
1495 1490 poststatus = self._repo.postdsstatus()
1496 1491 if fixup or poststatus:
1497 1492 try:
1498 1493 oldid = self._repo.dirstate.identity()
1499 1494
1500 1495 # updating the dirstate is optional
1501 1496 # so we don't wait on the lock
1502 1497 # wlock can invalidate the dirstate, so cache normal _after_
1503 1498 # taking the lock
1504 1499 with self._repo.wlock(False):
1505 1500 if self._repo.dirstate.identity() == oldid:
1506 1501 if fixup:
1507 1502 normal = self._repo.dirstate.normal
1508 1503 for f in fixup:
1509 1504 normal(f)
1510 1505 # write changes out explicitly, because nesting
1511 1506 # wlock at runtime may prevent 'wlock.release()'
1512 1507 # after this block from doing so for subsequent
1513 1508 # changing files
1514 1509 tr = self._repo.currenttransaction()
1515 1510 self._repo.dirstate.write(tr)
1516 1511
1517 1512 if poststatus:
1518 1513 for ps in poststatus:
1519 1514 ps(self, status)
1520 1515 else:
1521 1516 # in this case, writing changes out breaks
1522 1517 # consistency, because .hg/dirstate was
1523 1518 # already changed simultaneously after last
1524 1519 # caching (see also issue5584 for detail)
1525 1520 self._repo.ui.debug('skip updating dirstate: '
1526 1521 'identity mismatch\n')
1527 1522 except error.LockError:
1528 1523 pass
1529 1524 finally:
1530 1525 # Even if the wlock couldn't be grabbed, clear out the list.
1531 1526 self._repo.clearpostdsstatus()
1532 1527
1533 1528 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1534 1529 '''Gets the status from the dirstate -- internal use only.'''
1535 1530 subrepos = []
1536 1531 if '.hgsub' in self:
1537 1532 subrepos = sorted(self.substate)
1538 1533 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1539 1534 clean=clean, unknown=unknown)
1540 1535
1541 1536 # check for any possibly clean files
1542 1537 fixup = []
1543 1538 if cmp:
1544 1539 modified2, deleted2, fixup = self._checklookup(cmp)
1545 1540 s.modified.extend(modified2)
1546 1541 s.deleted.extend(deleted2)
1547 1542
1548 1543 if fixup and clean:
1549 1544 s.clean.extend(fixup)
1550 1545
1551 1546 self._poststatusfixup(s, fixup)
1552 1547
1553 1548 if match.always():
1554 1549 # cache for performance
1555 1550 if s.unknown or s.ignored or s.clean:
1556 1551 # "_status" is cached with list*=False in the normal route
1557 1552 self._status = scmutil.status(s.modified, s.added, s.removed,
1558 1553 s.deleted, [], [], [])
1559 1554 else:
1560 1555 self._status = s
1561 1556
1562 1557 return s
1563 1558
1564 1559 @propertycache
1565 1560 def _copies(self):
1566 1561 p1copies = {}
1567 1562 p2copies = {}
1568 1563 parents = self._repo.dirstate.parents()
1569 1564 p1manifest = self._repo[parents[0]].manifest()
1570 1565 p2manifest = self._repo[parents[1]].manifest()
1571 1566 narrowmatch = self._repo.narrowmatch()
1572 1567 for dst, src in self._repo.dirstate.copies().items():
1573 1568 if not narrowmatch(dst):
1574 1569 continue
1575 1570 if src in p1manifest:
1576 1571 p1copies[dst] = src
1577 1572 elif src in p2manifest:
1578 1573 p2copies[dst] = src
1579 1574 return p1copies, p2copies
1580 1575
1581 1576 @propertycache
1582 1577 def _manifest(self):
1583 1578 """generate a manifest corresponding to the values in self._status
1584 1579
1585 1580 This reuse the file nodeid from parent, but we use special node
1586 1581 identifiers for added and modified files. This is used by manifests
1587 1582 merge to see that files are different and by update logic to avoid
1588 1583 deleting newly added files.
1589 1584 """
1590 1585 return self._buildstatusmanifest(self._status)
1591 1586
1592 1587 def _buildstatusmanifest(self, status):
1593 1588 """Builds a manifest that includes the given status results."""
1594 1589 parents = self.parents()
1595 1590
1596 1591 man = parents[0].manifest().copy()
1597 1592
1598 1593 ff = self._flagfunc
1599 1594 for i, l in ((addednodeid, status.added),
1600 1595 (modifiednodeid, status.modified)):
1601 1596 for f in l:
1602 1597 man[f] = i
1603 1598 try:
1604 1599 man.setflag(f, ff(f))
1605 1600 except OSError:
1606 1601 pass
1607 1602
1608 1603 for f in status.deleted + status.removed:
1609 1604 if f in man:
1610 1605 del man[f]
1611 1606
1612 1607 return man
1613 1608
1614 1609 def _buildstatus(self, other, s, match, listignored, listclean,
1615 1610 listunknown):
1616 1611 """build a status with respect to another context
1617 1612
1618 1613 This includes logic for maintaining the fast path of status when
1619 1614 comparing the working directory against its parent, which is to skip
1620 1615 building a new manifest if self (working directory) is not comparing
1621 1616 against its parent (repo['.']).
1622 1617 """
1623 1618 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1624 1619 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1625 1620 # might have accidentally ended up with the entire contents of the file
1626 1621 # they are supposed to be linking to.
1627 1622 s.modified[:] = self._filtersuspectsymlink(s.modified)
1628 1623 if other != self._repo['.']:
1629 1624 s = super(workingctx, self)._buildstatus(other, s, match,
1630 1625 listignored, listclean,
1631 1626 listunknown)
1632 1627 return s
1633 1628
1634 1629 def _matchstatus(self, other, match):
1635 1630 """override the match method with a filter for directory patterns
1636 1631
1637 1632 We use inheritance to customize the match.bad method only in cases of
1638 1633 workingctx since it belongs only to the working directory when
1639 1634 comparing against the parent changeset.
1640 1635
1641 1636 If we aren't comparing against the working directory's parent, then we
1642 1637 just use the default match object sent to us.
1643 1638 """
1644 1639 if other != self._repo['.']:
1645 1640 def bad(f, msg):
1646 1641 # 'f' may be a directory pattern from 'match.files()',
1647 1642 # so 'f not in ctx1' is not enough
1648 1643 if f not in other and not other.hasdir(f):
1649 1644 self._repo.ui.warn('%s: %s\n' %
1650 1645 (self._repo.dirstate.pathto(f), msg))
1651 1646 match.bad = bad
1652 1647 return match
1653 1648
1654 1649 def walk(self, match):
1655 1650 '''Generates matching file names.'''
1656 1651 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1657 1652 subrepos=sorted(self.substate),
1658 1653 unknown=True, ignored=False))
1659 1654
1660 1655 def matches(self, match):
1661 1656 match = self._repo.narrowmatch(match)
1662 1657 ds = self._repo.dirstate
1663 1658 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1664 1659
1665 1660 def markcommitted(self, node):
1666 1661 with self._repo.dirstate.parentchange():
1667 1662 for f in self.modified() + self.added():
1668 1663 self._repo.dirstate.normal(f)
1669 1664 for f in self.removed():
1670 1665 self._repo.dirstate.drop(f)
1671 1666 self._repo.dirstate.setparents(node)
1672 1667
1673 1668 # write changes out explicitly, because nesting wlock at
1674 1669 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1675 1670 # from immediately doing so for subsequent changing files
1676 1671 self._repo.dirstate.write(self._repo.currenttransaction())
1677 1672
1678 1673 sparse.aftercommit(self._repo, node)
1679 1674
1680 1675 class committablefilectx(basefilectx):
1681 1676 """A committablefilectx provides common functionality for a file context
1682 1677 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1683 1678 def __init__(self, repo, path, filelog=None, ctx=None):
1684 1679 self._repo = repo
1685 1680 self._path = path
1686 1681 self._changeid = None
1687 1682 self._filerev = self._filenode = None
1688 1683
1689 1684 if filelog is not None:
1690 1685 self._filelog = filelog
1691 1686 if ctx:
1692 1687 self._changectx = ctx
1693 1688
1694 1689 def __nonzero__(self):
1695 1690 return True
1696 1691
1697 1692 __bool__ = __nonzero__
1698 1693
1699 1694 def linkrev(self):
1700 1695 # linked to self._changectx no matter if file is modified or not
1701 1696 return self.rev()
1702 1697
1703 1698 def renamed(self):
1704 1699 path = self.copysource()
1705 1700 if not path:
1706 1701 return None
1707 1702 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1708 1703
1709 1704 def parents(self):
1710 1705 '''return parent filectxs, following copies if necessary'''
1711 1706 def filenode(ctx, path):
1712 1707 return ctx._manifest.get(path, nullid)
1713 1708
1714 1709 path = self._path
1715 1710 fl = self._filelog
1716 1711 pcl = self._changectx._parents
1717 1712 renamed = self.renamed()
1718 1713
1719 1714 if renamed:
1720 1715 pl = [renamed + (None,)]
1721 1716 else:
1722 1717 pl = [(path, filenode(pcl[0], path), fl)]
1723 1718
1724 1719 for pc in pcl[1:]:
1725 1720 pl.append((path, filenode(pc, path), fl))
1726 1721
1727 1722 return [self._parentfilectx(p, fileid=n, filelog=l)
1728 1723 for p, n, l in pl if n != nullid]
1729 1724
1730 1725 def children(self):
1731 1726 return []
1732 1727
1733 1728 class workingfilectx(committablefilectx):
1734 1729 """A workingfilectx object makes access to data related to a particular
1735 1730 file in the working directory convenient."""
1736 1731 def __init__(self, repo, path, filelog=None, workingctx=None):
1737 1732 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1738 1733
1739 1734 @propertycache
1740 1735 def _changectx(self):
1741 1736 return workingctx(self._repo)
1742 1737
1743 1738 def data(self):
1744 1739 return self._repo.wread(self._path)
1745 1740 def copysource(self):
1746 1741 return self._repo.dirstate.copied(self._path)
1747 1742
1748 1743 def size(self):
1749 1744 return self._repo.wvfs.lstat(self._path).st_size
1750 1745 def lstat(self):
1751 1746 return self._repo.wvfs.lstat(self._path)
1752 1747 def date(self):
1753 1748 t, tz = self._changectx.date()
1754 1749 try:
1755 1750 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1756 1751 except OSError as err:
1757 1752 if err.errno != errno.ENOENT:
1758 1753 raise
1759 1754 return (t, tz)
1760 1755
1761 1756 def exists(self):
1762 1757 return self._repo.wvfs.exists(self._path)
1763 1758
1764 1759 def lexists(self):
1765 1760 return self._repo.wvfs.lexists(self._path)
1766 1761
1767 1762 def audit(self):
1768 1763 return self._repo.wvfs.audit(self._path)
1769 1764
1770 1765 def cmp(self, fctx):
1771 1766 """compare with other file context
1772 1767
1773 1768 returns True if different than fctx.
1774 1769 """
1775 1770 # fctx should be a filectx (not a workingfilectx)
1776 1771 # invert comparison to reuse the same code path
1777 1772 return fctx.cmp(self)
1778 1773
1779 1774 def remove(self, ignoremissing=False):
1780 1775 """wraps unlink for a repo's working directory"""
1781 1776 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1782 1777 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1783 1778 rmdir=rmdir)
1784 1779
1785 1780 def write(self, data, flags, backgroundclose=False, **kwargs):
1786 1781 """wraps repo.wwrite"""
1787 1782 return self._repo.wwrite(self._path, data, flags,
1788 1783 backgroundclose=backgroundclose,
1789 1784 **kwargs)
1790 1785
1791 1786 def markcopied(self, src):
1792 1787 """marks this file a copy of `src`"""
1793 1788 self._repo.dirstate.copy(src, self._path)
1794 1789
1795 1790 def clearunknown(self):
1796 1791 """Removes conflicting items in the working directory so that
1797 1792 ``write()`` can be called successfully.
1798 1793 """
1799 1794 wvfs = self._repo.wvfs
1800 1795 f = self._path
1801 1796 wvfs.audit(f)
1802 1797 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1803 1798 # remove files under the directory as they should already be
1804 1799 # warned and backed up
1805 1800 if wvfs.isdir(f) and not wvfs.islink(f):
1806 1801 wvfs.rmtree(f, forcibly=True)
1807 1802 for p in reversed(list(util.finddirs(f))):
1808 1803 if wvfs.isfileorlink(p):
1809 1804 wvfs.unlink(p)
1810 1805 break
1811 1806 else:
1812 1807 # don't remove files if path conflicts are not processed
1813 1808 if wvfs.isdir(f) and not wvfs.islink(f):
1814 1809 wvfs.removedirs(f)
1815 1810
1816 1811 def setflags(self, l, x):
1817 1812 self._repo.wvfs.setflags(self._path, l, x)
1818 1813
1819 1814 class overlayworkingctx(committablectx):
1820 1815 """Wraps another mutable context with a write-back cache that can be
1821 1816 converted into a commit context.
1822 1817
1823 1818 self._cache[path] maps to a dict with keys: {
1824 1819 'exists': bool?
1825 1820 'date': date?
1826 1821 'data': str?
1827 1822 'flags': str?
1828 1823 'copied': str? (path or None)
1829 1824 }
1830 1825 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1831 1826 is `False`, the file was deleted.
1832 1827 """
1833 1828
1834 1829 def __init__(self, repo):
1835 1830 super(overlayworkingctx, self).__init__(repo)
1836 1831 self.clean()
1837 1832
1838 1833 def setbase(self, wrappedctx):
1839 1834 self._wrappedctx = wrappedctx
1840 1835 self._parents = [wrappedctx]
1841 1836 # Drop old manifest cache as it is now out of date.
1842 1837 # This is necessary when, e.g., rebasing several nodes with one
1843 1838 # ``overlayworkingctx`` (e.g. with --collapse).
1844 1839 util.clearcachedproperty(self, '_manifest')
1845 1840
1846 1841 def data(self, path):
1847 1842 if self.isdirty(path):
1848 1843 if self._cache[path]['exists']:
1849 1844 if self._cache[path]['data'] is not None:
1850 1845 return self._cache[path]['data']
1851 1846 else:
1852 1847 # Must fallback here, too, because we only set flags.
1853 1848 return self._wrappedctx[path].data()
1854 1849 else:
1855 1850 raise error.ProgrammingError("No such file or directory: %s" %
1856 1851 path)
1857 1852 else:
1858 1853 return self._wrappedctx[path].data()
1859 1854
1860 1855 @propertycache
1861 1856 def _manifest(self):
1862 1857 parents = self.parents()
1863 1858 man = parents[0].manifest().copy()
1864 1859
1865 1860 flag = self._flagfunc
1866 1861 for path in self.added():
1867 1862 man[path] = addednodeid
1868 1863 man.setflag(path, flag(path))
1869 1864 for path in self.modified():
1870 1865 man[path] = modifiednodeid
1871 1866 man.setflag(path, flag(path))
1872 1867 for path in self.removed():
1873 1868 del man[path]
1874 1869 return man
1875 1870
1876 1871 @propertycache
1877 1872 def _flagfunc(self):
1878 1873 def f(path):
1879 1874 return self._cache[path]['flags']
1880 1875 return f
1881 1876
1882 1877 def files(self):
1883 1878 return sorted(self.added() + self.modified() + self.removed())
1884 1879
1885 1880 def modified(self):
1886 1881 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1887 1882 self._existsinparent(f)]
1888 1883
1889 1884 def added(self):
1890 1885 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1891 1886 not self._existsinparent(f)]
1892 1887
1893 1888 def removed(self):
1894 1889 return [f for f in self._cache.keys() if
1895 1890 not self._cache[f]['exists'] and self._existsinparent(f)]
1896 1891
1897 1892 def p1copies(self):
1898 1893 copies = self._repo._wrappedctx.p1copies().copy()
1899 1894 narrowmatch = self._repo.narrowmatch()
1900 1895 for f in self._cache.keys():
1901 1896 if not narrowmatch(f):
1902 1897 continue
1903 1898 copies.pop(f, None) # delete if it exists
1904 1899 source = self._cache[f]['copied']
1905 1900 if source:
1906 1901 copies[f] = source
1907 1902 return copies
1908 1903
1909 1904 def p2copies(self):
1910 1905 copies = self._repo._wrappedctx.p2copies().copy()
1911 1906 narrowmatch = self._repo.narrowmatch()
1912 1907 for f in self._cache.keys():
1913 1908 if not narrowmatch(f):
1914 1909 continue
1915 1910 copies.pop(f, None) # delete if it exists
1916 1911 source = self._cache[f]['copied']
1917 1912 if source:
1918 1913 copies[f] = source
1919 1914 return copies
1920 1915
1921 1916 def isinmemory(self):
1922 1917 return True
1923 1918
1924 1919 def filedate(self, path):
1925 1920 if self.isdirty(path):
1926 1921 return self._cache[path]['date']
1927 1922 else:
1928 1923 return self._wrappedctx[path].date()
1929 1924
1930 1925 def markcopied(self, path, origin):
1931 1926 self._markdirty(path, exists=True, date=self.filedate(path),
1932 1927 flags=self.flags(path), copied=origin)
1933 1928
1934 1929 def copydata(self, path):
1935 1930 if self.isdirty(path):
1936 1931 return self._cache[path]['copied']
1937 1932 else:
1938 1933 return None
1939 1934
1940 1935 def flags(self, path):
1941 1936 if self.isdirty(path):
1942 1937 if self._cache[path]['exists']:
1943 1938 return self._cache[path]['flags']
1944 1939 else:
1945 1940 raise error.ProgrammingError("No such file or directory: %s" %
1946 1941 self._path)
1947 1942 else:
1948 1943 return self._wrappedctx[path].flags()
1949 1944
1950 1945 def __contains__(self, key):
1951 1946 if key in self._cache:
1952 1947 return self._cache[key]['exists']
1953 1948 return key in self.p1()
1954 1949
1955 1950 def _existsinparent(self, path):
1956 1951 try:
1957 1952 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1958 1953 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1959 1954 # with an ``exists()`` function.
1960 1955 self._wrappedctx[path]
1961 1956 return True
1962 1957 except error.ManifestLookupError:
1963 1958 return False
1964 1959
1965 1960 def _auditconflicts(self, path):
1966 1961 """Replicates conflict checks done by wvfs.write().
1967 1962
1968 1963 Since we never write to the filesystem and never call `applyupdates` in
1969 1964 IMM, we'll never check that a path is actually writable -- e.g., because
1970 1965 it adds `a/foo`, but `a` is actually a file in the other commit.
1971 1966 """
1972 1967 def fail(path, component):
1973 1968 # p1() is the base and we're receiving "writes" for p2()'s
1974 1969 # files.
1975 1970 if 'l' in self.p1()[component].flags():
1976 1971 raise error.Abort("error: %s conflicts with symlink %s "
1977 1972 "in %d." % (path, component,
1978 1973 self.p1().rev()))
1979 1974 else:
1980 1975 raise error.Abort("error: '%s' conflicts with file '%s' in "
1981 1976 "%d." % (path, component,
1982 1977 self.p1().rev()))
1983 1978
1984 1979 # Test that each new directory to be created to write this path from p2
1985 1980 # is not a file in p1.
1986 1981 components = path.split('/')
1987 1982 for i in pycompat.xrange(len(components)):
1988 1983 component = "/".join(components[0:i])
1989 1984 if component in self:
1990 1985 fail(path, component)
1991 1986
1992 1987 # Test the other direction -- that this path from p2 isn't a directory
1993 1988 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1994 1989 match = self.match([path], default=b'path')
1995 1990 matches = self.p1().manifest().matches(match)
1996 1991 mfiles = matches.keys()
1997 1992 if len(mfiles) > 0:
1998 1993 if len(mfiles) == 1 and mfiles[0] == path:
1999 1994 return
2000 1995 # omit the files which are deleted in current IMM wctx
2001 1996 mfiles = [m for m in mfiles if m in self]
2002 1997 if not mfiles:
2003 1998 return
2004 1999 raise error.Abort("error: file '%s' cannot be written because "
2005 2000 " '%s/' is a directory in %s (containing %d "
2006 2001 "entries: %s)"
2007 2002 % (path, path, self.p1(), len(mfiles),
2008 2003 ', '.join(mfiles)))
2009 2004
2010 2005 def write(self, path, data, flags='', **kwargs):
2011 2006 if data is None:
2012 2007 raise error.ProgrammingError("data must be non-None")
2013 2008 self._auditconflicts(path)
2014 2009 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2015 2010 flags=flags)
2016 2011
2017 2012 def setflags(self, path, l, x):
2018 2013 flag = ''
2019 2014 if l:
2020 2015 flag = 'l'
2021 2016 elif x:
2022 2017 flag = 'x'
2023 2018 self._markdirty(path, exists=True, date=dateutil.makedate(),
2024 2019 flags=flag)
2025 2020
2026 2021 def remove(self, path):
2027 2022 self._markdirty(path, exists=False)
2028 2023
2029 2024 def exists(self, path):
2030 2025 """exists behaves like `lexists`, but needs to follow symlinks and
2031 2026 return False if they are broken.
2032 2027 """
2033 2028 if self.isdirty(path):
2034 2029 # If this path exists and is a symlink, "follow" it by calling
2035 2030 # exists on the destination path.
2036 2031 if (self._cache[path]['exists'] and
2037 2032 'l' in self._cache[path]['flags']):
2038 2033 return self.exists(self._cache[path]['data'].strip())
2039 2034 else:
2040 2035 return self._cache[path]['exists']
2041 2036
2042 2037 return self._existsinparent(path)
2043 2038
2044 2039 def lexists(self, path):
2045 2040 """lexists returns True if the path exists"""
2046 2041 if self.isdirty(path):
2047 2042 return self._cache[path]['exists']
2048 2043
2049 2044 return self._existsinparent(path)
2050 2045
2051 2046 def size(self, path):
2052 2047 if self.isdirty(path):
2053 2048 if self._cache[path]['exists']:
2054 2049 return len(self._cache[path]['data'])
2055 2050 else:
2056 2051 raise error.ProgrammingError("No such file or directory: %s" %
2057 2052 self._path)
2058 2053 return self._wrappedctx[path].size()
2059 2054
2060 2055 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2061 2056 user=None, editor=None):
2062 2057 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2063 2058 committed.
2064 2059
2065 2060 ``text`` is the commit message.
2066 2061 ``parents`` (optional) are rev numbers.
2067 2062 """
2068 2063 # Default parents to the wrapped contexts' if not passed.
2069 2064 if parents is None:
2070 2065 parents = self._wrappedctx.parents()
2071 2066 if len(parents) == 1:
2072 2067 parents = (parents[0], None)
2073 2068
2074 2069 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2075 2070 if parents[1] is None:
2076 2071 parents = (self._repo[parents[0]], None)
2077 2072 else:
2078 2073 parents = (self._repo[parents[0]], self._repo[parents[1]])
2079 2074
2080 2075 files = self.files()
2081 2076 def getfile(repo, memctx, path):
2082 2077 if self._cache[path]['exists']:
2083 2078 return memfilectx(repo, memctx, path,
2084 2079 self._cache[path]['data'],
2085 2080 'l' in self._cache[path]['flags'],
2086 2081 'x' in self._cache[path]['flags'],
2087 2082 self._cache[path]['copied'])
2088 2083 else:
2089 2084 # Returning None, but including the path in `files`, is
2090 2085 # necessary for memctx to register a deletion.
2091 2086 return None
2092 2087 return memctx(self._repo, parents, text, files, getfile, date=date,
2093 2088 extra=extra, user=user, branch=branch, editor=editor)
2094 2089
2095 2090 def isdirty(self, path):
2096 2091 return path in self._cache
2097 2092
2098 2093 def isempty(self):
2099 2094 # We need to discard any keys that are actually clean before the empty
2100 2095 # commit check.
2101 2096 self._compact()
2102 2097 return len(self._cache) == 0
2103 2098
2104 2099 def clean(self):
2105 2100 self._cache = {}
2106 2101
2107 2102 def _compact(self):
2108 2103 """Removes keys from the cache that are actually clean, by comparing
2109 2104 them with the underlying context.
2110 2105
2111 2106 This can occur during the merge process, e.g. by passing --tool :local
2112 2107 to resolve a conflict.
2113 2108 """
2114 2109 keys = []
2115 2110 # This won't be perfect, but can help performance significantly when
2116 2111 # using things like remotefilelog.
2117 2112 scmutil.prefetchfiles(
2118 2113 self.repo(), [self.p1().rev()],
2119 2114 scmutil.matchfiles(self.repo(), self._cache.keys()))
2120 2115
2121 2116 for path in self._cache.keys():
2122 2117 cache = self._cache[path]
2123 2118 try:
2124 2119 underlying = self._wrappedctx[path]
2125 2120 if (underlying.data() == cache['data'] and
2126 2121 underlying.flags() == cache['flags']):
2127 2122 keys.append(path)
2128 2123 except error.ManifestLookupError:
2129 2124 # Path not in the underlying manifest (created).
2130 2125 continue
2131 2126
2132 2127 for path in keys:
2133 2128 del self._cache[path]
2134 2129 return keys
2135 2130
2136 2131 def _markdirty(self, path, exists, data=None, date=None, flags='',
2137 2132 copied=None):
2138 2133 # data not provided, let's see if we already have some; if not, let's
2139 2134 # grab it from our underlying context, so that we always have data if
2140 2135 # the file is marked as existing.
2141 2136 if exists and data is None:
2142 2137 oldentry = self._cache.get(path) or {}
2143 2138 data = oldentry.get('data')
2144 2139 if data is None:
2145 2140 data = self._wrappedctx[path].data()
2146 2141
2147 2142 self._cache[path] = {
2148 2143 'exists': exists,
2149 2144 'data': data,
2150 2145 'date': date,
2151 2146 'flags': flags,
2152 2147 'copied': copied,
2153 2148 }
2154 2149
2155 2150 def filectx(self, path, filelog=None):
2156 2151 return overlayworkingfilectx(self._repo, path, parent=self,
2157 2152 filelog=filelog)
2158 2153
2159 2154 class overlayworkingfilectx(committablefilectx):
2160 2155 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2161 2156 cache, which can be flushed through later by calling ``flush()``."""
2162 2157
2163 2158 def __init__(self, repo, path, filelog=None, parent=None):
2164 2159 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2165 2160 parent)
2166 2161 self._repo = repo
2167 2162 self._parent = parent
2168 2163 self._path = path
2169 2164
2170 2165 def cmp(self, fctx):
2171 2166 return self.data() != fctx.data()
2172 2167
2173 2168 def changectx(self):
2174 2169 return self._parent
2175 2170
2176 2171 def data(self):
2177 2172 return self._parent.data(self._path)
2178 2173
2179 2174 def date(self):
2180 2175 return self._parent.filedate(self._path)
2181 2176
2182 2177 def exists(self):
2183 2178 return self.lexists()
2184 2179
2185 2180 def lexists(self):
2186 2181 return self._parent.exists(self._path)
2187 2182
2188 2183 def copysource(self):
2189 2184 return self._parent.copydata(self._path)
2190 2185
2191 2186 def size(self):
2192 2187 return self._parent.size(self._path)
2193 2188
2194 2189 def markcopied(self, origin):
2195 2190 self._parent.markcopied(self._path, origin)
2196 2191
2197 2192 def audit(self):
2198 2193 pass
2199 2194
2200 2195 def flags(self):
2201 2196 return self._parent.flags(self._path)
2202 2197
2203 2198 def setflags(self, islink, isexec):
2204 2199 return self._parent.setflags(self._path, islink, isexec)
2205 2200
2206 2201 def write(self, data, flags, backgroundclose=False, **kwargs):
2207 2202 return self._parent.write(self._path, data, flags, **kwargs)
2208 2203
2209 2204 def remove(self, ignoremissing=False):
2210 2205 return self._parent.remove(self._path)
2211 2206
2212 2207 def clearunknown(self):
2213 2208 pass
2214 2209
2215 2210 class workingcommitctx(workingctx):
2216 2211 """A workingcommitctx object makes access to data related to
2217 2212 the revision being committed convenient.
2218 2213
2219 2214 This hides changes in the working directory, if they aren't
2220 2215 committed in this context.
2221 2216 """
2222 2217 def __init__(self, repo, changes,
2223 2218 text="", user=None, date=None, extra=None):
2224 2219 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2225 2220 changes)
2226 2221
2227 2222 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2228 2223 """Return matched files only in ``self._status``
2229 2224
2230 2225 Uncommitted files appear "clean" via this context, even if
2231 2226 they aren't actually so in the working directory.
2232 2227 """
2233 2228 if clean:
2234 2229 clean = [f for f in self._manifest if f not in self._changedset]
2235 2230 else:
2236 2231 clean = []
2237 2232 return scmutil.status([f for f in self._status.modified if match(f)],
2238 2233 [f for f in self._status.added if match(f)],
2239 2234 [f for f in self._status.removed if match(f)],
2240 2235 [], [], [], clean)
2241 2236
2242 2237 @propertycache
2243 2238 def _changedset(self):
2244 2239 """Return the set of files changed in this context
2245 2240 """
2246 2241 changed = set(self._status.modified)
2247 2242 changed.update(self._status.added)
2248 2243 changed.update(self._status.removed)
2249 2244 return changed
2250 2245
2251 2246 def makecachingfilectxfn(func):
2252 2247 """Create a filectxfn that caches based on the path.
2253 2248
2254 2249 We can't use util.cachefunc because it uses all arguments as the cache
2255 2250 key and this creates a cycle since the arguments include the repo and
2256 2251 memctx.
2257 2252 """
2258 2253 cache = {}
2259 2254
2260 2255 def getfilectx(repo, memctx, path):
2261 2256 if path not in cache:
2262 2257 cache[path] = func(repo, memctx, path)
2263 2258 return cache[path]
2264 2259
2265 2260 return getfilectx
2266 2261
2267 2262 def memfilefromctx(ctx):
2268 2263 """Given a context return a memfilectx for ctx[path]
2269 2264
2270 2265 This is a convenience method for building a memctx based on another
2271 2266 context.
2272 2267 """
2273 2268 def getfilectx(repo, memctx, path):
2274 2269 fctx = ctx[path]
2275 2270 copysource = fctx.copysource()
2276 2271 return memfilectx(repo, memctx, path, fctx.data(),
2277 2272 islink=fctx.islink(), isexec=fctx.isexec(),
2278 2273 copysource=copysource)
2279 2274
2280 2275 return getfilectx
2281 2276
2282 2277 def memfilefrompatch(patchstore):
2283 2278 """Given a patch (e.g. patchstore object) return a memfilectx
2284 2279
2285 2280 This is a convenience method for building a memctx based on a patchstore.
2286 2281 """
2287 2282 def getfilectx(repo, memctx, path):
2288 2283 data, mode, copysource = patchstore.getfile(path)
2289 2284 if data is None:
2290 2285 return None
2291 2286 islink, isexec = mode
2292 2287 return memfilectx(repo, memctx, path, data, islink=islink,
2293 2288 isexec=isexec, copysource=copysource)
2294 2289
2295 2290 return getfilectx
2296 2291
2297 2292 class memctx(committablectx):
2298 2293 """Use memctx to perform in-memory commits via localrepo.commitctx().
2299 2294
2300 2295 Revision information is supplied at initialization time while
2301 2296 related files data and is made available through a callback
2302 2297 mechanism. 'repo' is the current localrepo, 'parents' is a
2303 2298 sequence of two parent revisions identifiers (pass None for every
2304 2299 missing parent), 'text' is the commit message and 'files' lists
2305 2300 names of files touched by the revision (normalized and relative to
2306 2301 repository root).
2307 2302
2308 2303 filectxfn(repo, memctx, path) is a callable receiving the
2309 2304 repository, the current memctx object and the normalized path of
2310 2305 requested file, relative to repository root. It is fired by the
2311 2306 commit function for every file in 'files', but calls order is
2312 2307 undefined. If the file is available in the revision being
2313 2308 committed (updated or added), filectxfn returns a memfilectx
2314 2309 object. If the file was removed, filectxfn return None for recent
2315 2310 Mercurial. Moved files are represented by marking the source file
2316 2311 removed and the new file added with copy information (see
2317 2312 memfilectx).
2318 2313
2319 2314 user receives the committer name and defaults to current
2320 2315 repository username, date is the commit date in any format
2321 2316 supported by dateutil.parsedate() and defaults to current date, extra
2322 2317 is a dictionary of metadata or is left empty.
2323 2318 """
2324 2319
2325 2320 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2326 2321 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2327 2322 # this field to determine what to do in filectxfn.
2328 2323 _returnnoneformissingfiles = True
2329 2324
2330 2325 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2331 2326 date=None, extra=None, branch=None, editor=False):
2332 2327 super(memctx, self).__init__(repo, text, user, date, extra,
2333 2328 branch=branch)
2334 2329 self._rev = None
2335 2330 self._node = None
2336 2331 parents = [(p or nullid) for p in parents]
2337 2332 p1, p2 = parents
2338 2333 self._parents = [self._repo[p] for p in (p1, p2)]
2339 2334 files = sorted(set(files))
2340 2335 self._files = files
2341 2336 self.substate = {}
2342 2337
2343 2338 if isinstance(filectxfn, patch.filestore):
2344 2339 filectxfn = memfilefrompatch(filectxfn)
2345 2340 elif not callable(filectxfn):
2346 2341 # if store is not callable, wrap it in a function
2347 2342 filectxfn = memfilefromctx(filectxfn)
2348 2343
2349 2344 # memoizing increases performance for e.g. vcs convert scenarios.
2350 2345 self._filectxfn = makecachingfilectxfn(filectxfn)
2351 2346
2352 2347 if editor:
2353 2348 self._text = editor(self._repo, self, [])
2354 2349 self._repo.savecommitmessage(self._text)
2355 2350
2356 2351 def filectx(self, path, filelog=None):
2357 2352 """get a file context from the working directory
2358 2353
2359 2354 Returns None if file doesn't exist and should be removed."""
2360 2355 return self._filectxfn(self._repo, self, path)
2361 2356
2362 2357 def commit(self):
2363 2358 """commit context to the repo"""
2364 2359 return self._repo.commitctx(self)
2365 2360
2366 2361 @propertycache
2367 2362 def _manifest(self):
2368 2363 """generate a manifest based on the return values of filectxfn"""
2369 2364
2370 2365 # keep this simple for now; just worry about p1
2371 2366 pctx = self._parents[0]
2372 2367 man = pctx.manifest().copy()
2373 2368
2374 2369 for f in self._status.modified:
2375 2370 man[f] = modifiednodeid
2376 2371
2377 2372 for f in self._status.added:
2378 2373 man[f] = addednodeid
2379 2374
2380 2375 for f in self._status.removed:
2381 2376 if f in man:
2382 2377 del man[f]
2383 2378
2384 2379 return man
2385 2380
2386 2381 @propertycache
2387 2382 def _status(self):
2388 2383 """Calculate exact status from ``files`` specified at construction
2389 2384 """
2390 2385 man1 = self.p1().manifest()
2391 2386 p2 = self._parents[1]
2392 2387 # "1 < len(self._parents)" can't be used for checking
2393 2388 # existence of the 2nd parent, because "memctx._parents" is
2394 2389 # explicitly initialized by the list, of which length is 2.
2395 2390 if p2.node() != nullid:
2396 2391 man2 = p2.manifest()
2397 2392 managing = lambda f: f in man1 or f in man2
2398 2393 else:
2399 2394 managing = lambda f: f in man1
2400 2395
2401 2396 modified, added, removed = [], [], []
2402 2397 for f in self._files:
2403 2398 if not managing(f):
2404 2399 added.append(f)
2405 2400 elif self[f]:
2406 2401 modified.append(f)
2407 2402 else:
2408 2403 removed.append(f)
2409 2404
2410 2405 return scmutil.status(modified, added, removed, [], [], [], [])
2411 2406
2412 2407 class memfilectx(committablefilectx):
2413 2408 """memfilectx represents an in-memory file to commit.
2414 2409
2415 2410 See memctx and committablefilectx for more details.
2416 2411 """
2417 2412 def __init__(self, repo, changectx, path, data, islink=False,
2418 2413 isexec=False, copysource=None):
2419 2414 """
2420 2415 path is the normalized file path relative to repository root.
2421 2416 data is the file content as a string.
2422 2417 islink is True if the file is a symbolic link.
2423 2418 isexec is True if the file is executable.
2424 2419 copied is the source file path if current file was copied in the
2425 2420 revision being committed, or None."""
2426 2421 super(memfilectx, self).__init__(repo, path, None, changectx)
2427 2422 self._data = data
2428 2423 if islink:
2429 2424 self._flags = 'l'
2430 2425 elif isexec:
2431 2426 self._flags = 'x'
2432 2427 else:
2433 2428 self._flags = ''
2434 2429 self._copysource = copysource
2435 2430
2436 2431 def copysource(self):
2437 2432 return self._copysource
2438 2433
2439 2434 def cmp(self, fctx):
2440 2435 return self.data() != fctx.data()
2441 2436
2442 2437 def data(self):
2443 2438 return self._data
2444 2439
2445 2440 def remove(self, ignoremissing=False):
2446 2441 """wraps unlink for a repo's working directory"""
2447 2442 # need to figure out what to do here
2448 2443 del self._changectx[self._path]
2449 2444
2450 2445 def write(self, data, flags, **kwargs):
2451 2446 """wraps repo.wwrite"""
2452 2447 self._data = data
2453 2448
2454 2449
2455 2450 class metadataonlyctx(committablectx):
2456 2451 """Like memctx but it's reusing the manifest of different commit.
2457 2452 Intended to be used by lightweight operations that are creating
2458 2453 metadata-only changes.
2459 2454
2460 2455 Revision information is supplied at initialization time. 'repo' is the
2461 2456 current localrepo, 'ctx' is original revision which manifest we're reuisng
2462 2457 'parents' is a sequence of two parent revisions identifiers (pass None for
2463 2458 every missing parent), 'text' is the commit.
2464 2459
2465 2460 user receives the committer name and defaults to current repository
2466 2461 username, date is the commit date in any format supported by
2467 2462 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2468 2463 metadata or is left empty.
2469 2464 """
2470 2465 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2471 2466 date=None, extra=None, editor=False):
2472 2467 if text is None:
2473 2468 text = originalctx.description()
2474 2469 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2475 2470 self._rev = None
2476 2471 self._node = None
2477 2472 self._originalctx = originalctx
2478 2473 self._manifestnode = originalctx.manifestnode()
2479 2474 if parents is None:
2480 2475 parents = originalctx.parents()
2481 2476 else:
2482 2477 parents = [repo[p] for p in parents if p is not None]
2483 2478 parents = parents[:]
2484 2479 while len(parents) < 2:
2485 2480 parents.append(repo[nullid])
2486 2481 p1, p2 = self._parents = parents
2487 2482
2488 2483 # sanity check to ensure that the reused manifest parents are
2489 2484 # manifests of our commit parents
2490 2485 mp1, mp2 = self.manifestctx().parents
2491 2486 if p1 != nullid and p1.manifestnode() != mp1:
2492 2487 raise RuntimeError(r"can't reuse the manifest: its p1 "
2493 2488 r"doesn't match the new ctx p1")
2494 2489 if p2 != nullid and p2.manifestnode() != mp2:
2495 2490 raise RuntimeError(r"can't reuse the manifest: "
2496 2491 r"its p2 doesn't match the new ctx p2")
2497 2492
2498 2493 self._files = originalctx.files()
2499 2494 self.substate = {}
2500 2495
2501 2496 if editor:
2502 2497 self._text = editor(self._repo, self, [])
2503 2498 self._repo.savecommitmessage(self._text)
2504 2499
2505 2500 def manifestnode(self):
2506 2501 return self._manifestnode
2507 2502
2508 2503 @property
2509 2504 def _manifestctx(self):
2510 2505 return self._repo.manifestlog[self._manifestnode]
2511 2506
2512 2507 def filectx(self, path, filelog=None):
2513 2508 return self._originalctx.filectx(path, filelog=filelog)
2514 2509
2515 2510 def commit(self):
2516 2511 """commit context to the repo"""
2517 2512 return self._repo.commitctx(self)
2518 2513
2519 2514 @property
2520 2515 def _manifest(self):
2521 2516 return self._originalctx.manifest()
2522 2517
2523 2518 @propertycache
2524 2519 def _status(self):
2525 2520 """Calculate exact status from ``files`` specified in the ``origctx``
2526 2521 and parents manifests.
2527 2522 """
2528 2523 man1 = self.p1().manifest()
2529 2524 p2 = self._parents[1]
2530 2525 # "1 < len(self._parents)" can't be used for checking
2531 2526 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2532 2527 # explicitly initialized by the list, of which length is 2.
2533 2528 if p2.node() != nullid:
2534 2529 man2 = p2.manifest()
2535 2530 managing = lambda f: f in man1 or f in man2
2536 2531 else:
2537 2532 managing = lambda f: f in man1
2538 2533
2539 2534 modified, added, removed = [], [], []
2540 2535 for f in self._files:
2541 2536 if not managing(f):
2542 2537 added.append(f)
2543 2538 elif f in self:
2544 2539 modified.append(f)
2545 2540 else:
2546 2541 removed.append(f)
2547 2542
2548 2543 return scmutil.status(modified, added, removed, [], [], [], [])
2549 2544
2550 2545 class arbitraryfilectx(object):
2551 2546 """Allows you to use filectx-like functions on a file in an arbitrary
2552 2547 location on disk, possibly not in the working directory.
2553 2548 """
2554 2549 def __init__(self, path, repo=None):
2555 2550 # Repo is optional because contrib/simplemerge uses this class.
2556 2551 self._repo = repo
2557 2552 self._path = path
2558 2553
2559 2554 def cmp(self, fctx):
2560 2555 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2561 2556 # path if either side is a symlink.
2562 2557 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2563 2558 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2564 2559 # Add a fast-path for merge if both sides are disk-backed.
2565 2560 # Note that filecmp uses the opposite return values (True if same)
2566 2561 # from our cmp functions (True if different).
2567 2562 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2568 2563 return self.data() != fctx.data()
2569 2564
2570 2565 def path(self):
2571 2566 return self._path
2572 2567
2573 2568 def flags(self):
2574 2569 return ''
2575 2570
2576 2571 def data(self):
2577 2572 return util.readfile(self._path)
2578 2573
2579 2574 def decodeddata(self):
2580 2575 with open(self._path, "rb") as f:
2581 2576 return f.read()
2582 2577
2583 2578 def remove(self):
2584 2579 util.unlink(self._path)
2585 2580
2586 2581 def write(self, data, flags, **kwargs):
2587 2582 assert not flags
2588 2583 with open(self._path, "wb") as f:
2589 2584 f.write(data)
@@ -1,1986 +1,1995 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import glob
12 12 import hashlib
13 13 import os
14 14 import posixpath
15 15 import re
16 16 import subprocess
17 17 import weakref
18 18
19 19 from .i18n import _
20 20 from .node import (
21 21 bin,
22 22 hex,
23 23 nullid,
24 24 nullrev,
25 25 short,
26 26 wdirid,
27 27 wdirrev,
28 28 )
29 29
30 30 from . import (
31 31 copies as copiesmod,
32 32 encoding,
33 33 error,
34 34 match as matchmod,
35 35 obsolete,
36 36 obsutil,
37 37 pathutil,
38 38 phases,
39 39 policy,
40 40 pycompat,
41 41 revsetlang,
42 42 similar,
43 43 smartset,
44 44 url,
45 45 util,
46 46 vfs,
47 47 )
48 48
49 49 from .utils import (
50 50 procutil,
51 51 stringutil,
52 52 )
53 53
54 54 if pycompat.iswindows:
55 55 from . import scmwindows as scmplatform
56 56 else:
57 57 from . import scmposix as scmplatform
58 58
59 59 parsers = policy.importmod(r'parsers')
60 60
61 61 termsize = scmplatform.termsize
62 62
63 63 class status(tuple):
64 64 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
65 65 and 'ignored' properties are only relevant to the working copy.
66 66 '''
67 67
68 68 __slots__ = ()
69 69
70 70 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
71 71 clean):
72 72 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
73 73 ignored, clean))
74 74
75 75 @property
76 76 def modified(self):
77 77 '''files that have been modified'''
78 78 return self[0]
79 79
80 80 @property
81 81 def added(self):
82 82 '''files that have been added'''
83 83 return self[1]
84 84
85 85 @property
86 86 def removed(self):
87 87 '''files that have been removed'''
88 88 return self[2]
89 89
90 90 @property
91 91 def deleted(self):
92 92 '''files that are in the dirstate, but have been deleted from the
93 93 working copy (aka "missing")
94 94 '''
95 95 return self[3]
96 96
97 97 @property
98 98 def unknown(self):
99 99 '''files not in the dirstate that are not ignored'''
100 100 return self[4]
101 101
102 102 @property
103 103 def ignored(self):
104 104 '''files not in the dirstate that are ignored (by _dirignore())'''
105 105 return self[5]
106 106
107 107 @property
108 108 def clean(self):
109 109 '''files that have not been modified'''
110 110 return self[6]
111 111
112 112 def __repr__(self, *args, **kwargs):
113 113 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
114 114 r'unknown=%s, ignored=%s, clean=%s>') %
115 115 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
116 116
117 117 def itersubrepos(ctx1, ctx2):
118 118 """find subrepos in ctx1 or ctx2"""
119 119 # Create a (subpath, ctx) mapping where we prefer subpaths from
120 120 # ctx1. The subpaths from ctx2 are important when the .hgsub file
121 121 # has been modified (in ctx2) but not yet committed (in ctx1).
122 122 subpaths = dict.fromkeys(ctx2.substate, ctx2)
123 123 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
124 124
125 125 missing = set()
126 126
127 127 for subpath in ctx2.substate:
128 128 if subpath not in ctx1.substate:
129 129 del subpaths[subpath]
130 130 missing.add(subpath)
131 131
132 132 for subpath, ctx in sorted(subpaths.iteritems()):
133 133 yield subpath, ctx.sub(subpath)
134 134
135 135 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
136 136 # status and diff will have an accurate result when it does
137 137 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
138 138 # against itself.
139 139 for subpath in missing:
140 140 yield subpath, ctx2.nullsub(subpath, ctx1)
141 141
142 142 def nochangesfound(ui, repo, excluded=None):
143 143 '''Report no changes for push/pull, excluded is None or a list of
144 144 nodes excluded from the push/pull.
145 145 '''
146 146 secretlist = []
147 147 if excluded:
148 148 for n in excluded:
149 149 ctx = repo[n]
150 150 if ctx.phase() >= phases.secret and not ctx.extinct():
151 151 secretlist.append(n)
152 152
153 153 if secretlist:
154 154 ui.status(_("no changes found (ignored %d secret changesets)\n")
155 155 % len(secretlist))
156 156 else:
157 157 ui.status(_("no changes found\n"))
158 158
159 159 def callcatch(ui, func):
160 160 """call func() with global exception handling
161 161
162 162 return func() if no exception happens. otherwise do some error handling
163 163 and return an exit code accordingly. does not handle all exceptions.
164 164 """
165 165 try:
166 166 try:
167 167 return func()
168 168 except: # re-raises
169 169 ui.traceback()
170 170 raise
171 171 # Global exception handling, alphabetically
172 172 # Mercurial-specific first, followed by built-in and library exceptions
173 173 except error.LockHeld as inst:
174 174 if inst.errno == errno.ETIMEDOUT:
175 175 reason = _('timed out waiting for lock held by %r') % (
176 176 pycompat.bytestr(inst.locker))
177 177 else:
178 178 reason = _('lock held by %r') % inst.locker
179 179 ui.error(_("abort: %s: %s\n") % (
180 180 inst.desc or stringutil.forcebytestr(inst.filename), reason))
181 181 if not inst.locker:
182 182 ui.error(_("(lock might be very busy)\n"))
183 183 except error.LockUnavailable as inst:
184 184 ui.error(_("abort: could not lock %s: %s\n") %
185 185 (inst.desc or stringutil.forcebytestr(inst.filename),
186 186 encoding.strtolocal(inst.strerror)))
187 187 except error.OutOfBandError as inst:
188 188 if inst.args:
189 189 msg = _("abort: remote error:\n")
190 190 else:
191 191 msg = _("abort: remote error\n")
192 192 ui.error(msg)
193 193 if inst.args:
194 194 ui.error(''.join(inst.args))
195 195 if inst.hint:
196 196 ui.error('(%s)\n' % inst.hint)
197 197 except error.RepoError as inst:
198 198 ui.error(_("abort: %s!\n") % inst)
199 199 if inst.hint:
200 200 ui.error(_("(%s)\n") % inst.hint)
201 201 except error.ResponseError as inst:
202 202 ui.error(_("abort: %s") % inst.args[0])
203 203 msg = inst.args[1]
204 204 if isinstance(msg, type(u'')):
205 205 msg = pycompat.sysbytes(msg)
206 206 if not isinstance(msg, bytes):
207 207 ui.error(" %r\n" % (msg,))
208 208 elif not msg:
209 209 ui.error(_(" empty string\n"))
210 210 else:
211 211 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
212 212 except error.CensoredNodeError as inst:
213 213 ui.error(_("abort: file censored %s!\n") % inst)
214 214 except error.StorageError as inst:
215 215 ui.error(_("abort: %s!\n") % inst)
216 216 if inst.hint:
217 217 ui.error(_("(%s)\n") % inst.hint)
218 218 except error.InterventionRequired as inst:
219 219 ui.error("%s\n" % inst)
220 220 if inst.hint:
221 221 ui.error(_("(%s)\n") % inst.hint)
222 222 return 1
223 223 except error.WdirUnsupported:
224 224 ui.error(_("abort: working directory revision cannot be specified\n"))
225 225 except error.Abort as inst:
226 226 ui.error(_("abort: %s\n") % inst)
227 227 if inst.hint:
228 228 ui.error(_("(%s)\n") % inst.hint)
229 229 except ImportError as inst:
230 230 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
231 231 m = stringutil.forcebytestr(inst).split()[-1]
232 232 if m in "mpatch bdiff".split():
233 233 ui.error(_("(did you forget to compile extensions?)\n"))
234 234 elif m in "zlib".split():
235 235 ui.error(_("(is your Python install correct?)\n"))
236 236 except (IOError, OSError) as inst:
237 237 if util.safehasattr(inst, "code"): # HTTPError
238 238 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
239 239 elif util.safehasattr(inst, "reason"): # URLError or SSLError
240 240 try: # usually it is in the form (errno, strerror)
241 241 reason = inst.reason.args[1]
242 242 except (AttributeError, IndexError):
243 243 # it might be anything, for example a string
244 244 reason = inst.reason
245 245 if isinstance(reason, pycompat.unicode):
246 246 # SSLError of Python 2.7.9 contains a unicode
247 247 reason = encoding.unitolocal(reason)
248 248 ui.error(_("abort: error: %s\n") % reason)
249 249 elif (util.safehasattr(inst, "args")
250 250 and inst.args and inst.args[0] == errno.EPIPE):
251 251 pass
252 252 elif getattr(inst, "strerror", None): # common IOError or OSError
253 253 if getattr(inst, "filename", None) is not None:
254 254 ui.error(_("abort: %s: '%s'\n") % (
255 255 encoding.strtolocal(inst.strerror),
256 256 stringutil.forcebytestr(inst.filename)))
257 257 else:
258 258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 259 else: # suspicious IOError
260 260 raise
261 261 except MemoryError:
262 262 ui.error(_("abort: out of memory\n"))
263 263 except SystemExit as inst:
264 264 # Commands shouldn't sys.exit directly, but give a return code.
265 265 # Just in case catch this and and pass exit code to caller.
266 266 return inst.code
267 267
268 268 return -1
269 269
270 270 def checknewlabel(repo, lbl, kind):
271 271 # Do not use the "kind" parameter in ui output.
272 272 # It makes strings difficult to translate.
273 273 if lbl in ['tip', '.', 'null']:
274 274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 275 for c in (':', '\0', '\n', '\r'):
276 276 if c in lbl:
277 277 raise error.Abort(
278 278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 279 try:
280 280 int(lbl)
281 281 raise error.Abort(_("cannot use an integer as a name"))
282 282 except ValueError:
283 283 pass
284 284 if lbl.strip() != lbl:
285 285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286 286
287 287 def checkfilename(f):
288 288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 289 if '\r' in f or '\n' in f:
290 290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 291 % pycompat.bytestr(f))
292 292
293 293 def checkportable(ui, f):
294 294 '''Check if filename f is portable and warn or abort depending on config'''
295 295 checkfilename(f)
296 296 abort, warn = checkportabilityalert(ui)
297 297 if abort or warn:
298 298 msg = util.checkwinfilename(f)
299 299 if msg:
300 300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 301 if abort:
302 302 raise error.Abort(msg)
303 303 ui.warn(_("warning: %s\n") % msg)
304 304
305 305 def checkportabilityalert(ui):
306 306 '''check if the user's config requests nothing, a warning, or abort for
307 307 non-portable filenames'''
308 308 val = ui.config('ui', 'portablefilenames')
309 309 lval = val.lower()
310 310 bval = stringutil.parsebool(val)
311 311 abort = pycompat.iswindows or lval == 'abort'
312 312 warn = bval or lval == 'warn'
313 313 if bval is None and not (warn or abort or lval == 'ignore'):
314 314 raise error.ConfigError(
315 315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 316 return abort, warn
317 317
318 318 class casecollisionauditor(object):
319 319 def __init__(self, ui, abort, dirstate):
320 320 self._ui = ui
321 321 self._abort = abort
322 322 allfiles = '\0'.join(dirstate._map)
323 323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 324 self._dirstate = dirstate
325 325 # The purpose of _newfiles is so that we don't complain about
326 326 # case collisions if someone were to call this object with the
327 327 # same filename twice.
328 328 self._newfiles = set()
329 329
330 330 def __call__(self, f):
331 331 if f in self._newfiles:
332 332 return
333 333 fl = encoding.lower(f)
334 334 if fl in self._loweredfiles and f not in self._dirstate:
335 335 msg = _('possible case-folding collision for %s') % f
336 336 if self._abort:
337 337 raise error.Abort(msg)
338 338 self._ui.warn(_("warning: %s\n") % msg)
339 339 self._loweredfiles.add(fl)
340 340 self._newfiles.add(f)
341 341
342 342 def filteredhash(repo, maxrev):
343 343 """build hash of filtered revisions in the current repoview.
344 344
345 345 Multiple caches perform up-to-date validation by checking that the
346 346 tiprev and tipnode stored in the cache file match the current repository.
347 347 However, this is not sufficient for validating repoviews because the set
348 348 of revisions in the view may change without the repository tiprev and
349 349 tipnode changing.
350 350
351 351 This function hashes all the revs filtered from the view and returns
352 352 that SHA-1 digest.
353 353 """
354 354 cl = repo.changelog
355 355 if not cl.filteredrevs:
356 356 return None
357 357 key = None
358 358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 359 if revs:
360 360 s = hashlib.sha1()
361 361 for rev in revs:
362 362 s.update('%d;' % rev)
363 363 key = s.digest()
364 364 return key
365 365
366 366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 367 '''yield every hg repository under path, always recursively.
368 368 The recurse flag will only control recursion into repo working dirs'''
369 369 def errhandler(err):
370 370 if err.filename == path:
371 371 raise err
372 372 samestat = getattr(os.path, 'samestat', None)
373 373 if followsym and samestat is not None:
374 374 def adddir(dirlst, dirname):
375 375 dirstat = os.stat(dirname)
376 376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 377 if not match:
378 378 dirlst.append(dirstat)
379 379 return not match
380 380 else:
381 381 followsym = False
382 382
383 383 if (seen_dirs is None) and followsym:
384 384 seen_dirs = []
385 385 adddir(seen_dirs, path)
386 386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 387 dirs.sort()
388 388 if '.hg' in dirs:
389 389 yield root # found a repository
390 390 qroot = os.path.join(root, '.hg', 'patches')
391 391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 392 yield qroot # we have a patch queue repo here
393 393 if recurse:
394 394 # avoid recursing inside the .hg directory
395 395 dirs.remove('.hg')
396 396 else:
397 397 dirs[:] = [] # don't descend further
398 398 elif followsym:
399 399 newdirs = []
400 400 for d in dirs:
401 401 fname = os.path.join(root, d)
402 402 if adddir(seen_dirs, fname):
403 403 if os.path.islink(fname):
404 404 for hgname in walkrepos(fname, True, seen_dirs):
405 405 yield hgname
406 406 else:
407 407 newdirs.append(d)
408 408 dirs[:] = newdirs
409 409
410 410 def binnode(ctx):
411 411 """Return binary node id for a given basectx"""
412 412 node = ctx.node()
413 413 if node is None:
414 414 return wdirid
415 415 return node
416 416
417 417 def intrev(ctx):
418 418 """Return integer for a given basectx that can be used in comparison or
419 419 arithmetic operation"""
420 420 rev = ctx.rev()
421 421 if rev is None:
422 422 return wdirrev
423 423 return rev
424 424
425 425 def formatchangeid(ctx):
426 426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 427 template provided by logcmdutil.changesettemplater"""
428 428 repo = ctx.repo()
429 429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430 430
431 431 def formatrevnode(ui, rev, node):
432 432 """Format given revision and node depending on the current verbosity"""
433 433 if ui.debugflag:
434 434 hexfunc = hex
435 435 else:
436 436 hexfunc = short
437 437 return '%d:%s' % (rev, hexfunc(node))
438 438
439 439 def resolvehexnodeidprefix(repo, prefix):
440 440 if (prefix.startswith('x') and
441 441 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
442 442 prefix = prefix[1:]
443 443 try:
444 444 # Uses unfiltered repo because it's faster when prefix is ambiguous/
445 445 # This matches the shortesthexnodeidprefix() function below.
446 446 node = repo.unfiltered().changelog._partialmatch(prefix)
447 447 except error.AmbiguousPrefixLookupError:
448 448 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
449 449 if revset:
450 450 # Clear config to avoid infinite recursion
451 451 configoverrides = {('experimental',
452 452 'revisions.disambiguatewithin'): None}
453 453 with repo.ui.configoverride(configoverrides):
454 454 revs = repo.anyrevs([revset], user=True)
455 455 matches = []
456 456 for rev in revs:
457 457 node = repo.changelog.node(rev)
458 458 if hex(node).startswith(prefix):
459 459 matches.append(node)
460 460 if len(matches) == 1:
461 461 return matches[0]
462 462 raise
463 463 if node is None:
464 464 return
465 465 repo.changelog.rev(node) # make sure node isn't filtered
466 466 return node
467 467
468 468 def mayberevnum(repo, prefix):
469 469 """Checks if the given prefix may be mistaken for a revision number"""
470 470 try:
471 471 i = int(prefix)
472 472 # if we are a pure int, then starting with zero will not be
473 473 # confused as a rev; or, obviously, if the int is larger
474 474 # than the value of the tip rev. We still need to disambiguate if
475 475 # prefix == '0', since that *is* a valid revnum.
476 476 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
477 477 return False
478 478 return True
479 479 except ValueError:
480 480 return False
481 481
482 482 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
483 483 """Find the shortest unambiguous prefix that matches hexnode.
484 484
485 485 If "cache" is not None, it must be a dictionary that can be used for
486 486 caching between calls to this method.
487 487 """
488 488 # _partialmatch() of filtered changelog could take O(len(repo)) time,
489 489 # which would be unacceptably slow. so we look for hash collision in
490 490 # unfiltered space, which means some hashes may be slightly longer.
491 491
492 492 minlength=max(minlength, 1)
493 493
494 494 def disambiguate(prefix):
495 495 """Disambiguate against revnums."""
496 496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 497 if mayberevnum(repo, prefix):
498 498 return 'x' + prefix
499 499 else:
500 500 return prefix
501 501
502 502 hexnode = hex(node)
503 503 for length in range(len(prefix), len(hexnode) + 1):
504 504 prefix = hexnode[:length]
505 505 if not mayberevnum(repo, prefix):
506 506 return prefix
507 507
508 508 cl = repo.unfiltered().changelog
509 509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 510 if revset:
511 511 revs = None
512 512 if cache is not None:
513 513 revs = cache.get('disambiguationrevset')
514 514 if revs is None:
515 515 revs = repo.anyrevs([revset], user=True)
516 516 if cache is not None:
517 517 cache['disambiguationrevset'] = revs
518 518 if cl.rev(node) in revs:
519 519 hexnode = hex(node)
520 520 nodetree = None
521 521 if cache is not None:
522 522 nodetree = cache.get('disambiguationnodetree')
523 523 if not nodetree:
524 524 try:
525 525 nodetree = parsers.nodetree(cl.index, len(revs))
526 526 except AttributeError:
527 527 # no native nodetree
528 528 pass
529 529 else:
530 530 for r in revs:
531 531 nodetree.insert(r)
532 532 if cache is not None:
533 533 cache['disambiguationnodetree'] = nodetree
534 534 if nodetree is not None:
535 535 length = max(nodetree.shortest(node), minlength)
536 536 prefix = hexnode[:length]
537 537 return disambiguate(prefix)
538 538 for length in range(minlength, len(hexnode) + 1):
539 539 matches = []
540 540 prefix = hexnode[:length]
541 541 for rev in revs:
542 542 otherhexnode = repo[rev].hex()
543 543 if prefix == otherhexnode[:length]:
544 544 matches.append(otherhexnode)
545 545 if len(matches) == 1:
546 546 return disambiguate(prefix)
547 547
548 548 try:
549 549 return disambiguate(cl.shortest(node, minlength))
550 550 except error.LookupError:
551 551 raise error.RepoLookupError()
552 552
553 553 def isrevsymbol(repo, symbol):
554 554 """Checks if a symbol exists in the repo.
555 555
556 556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 557 symbol is an ambiguous nodeid prefix.
558 558 """
559 559 try:
560 560 revsymbol(repo, symbol)
561 561 return True
562 562 except error.RepoLookupError:
563 563 return False
564 564
565 565 def revsymbol(repo, symbol):
566 566 """Returns a context given a single revision symbol (as string).
567 567
568 568 This is similar to revsingle(), but accepts only a single revision symbol,
569 569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 570 not "max(public())".
571 571 """
572 572 if not isinstance(symbol, bytes):
573 573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 574 "repo[symbol]?" % (symbol, type(symbol)))
575 575 raise error.ProgrammingError(msg)
576 576 try:
577 577 if symbol in ('.', 'tip', 'null'):
578 578 return repo[symbol]
579 579
580 580 try:
581 581 r = int(symbol)
582 582 if '%d' % r != symbol:
583 583 raise ValueError
584 584 l = len(repo.changelog)
585 585 if r < 0:
586 586 r += l
587 587 if r < 0 or r >= l and r != wdirrev:
588 588 raise ValueError
589 589 return repo[r]
590 590 except error.FilteredIndexError:
591 591 raise
592 592 except (ValueError, OverflowError, IndexError):
593 593 pass
594 594
595 595 if len(symbol) == 40:
596 596 try:
597 597 node = bin(symbol)
598 598 rev = repo.changelog.rev(node)
599 599 return repo[rev]
600 600 except error.FilteredLookupError:
601 601 raise
602 602 except (TypeError, LookupError):
603 603 pass
604 604
605 605 # look up bookmarks through the name interface
606 606 try:
607 607 node = repo.names.singlenode(repo, symbol)
608 608 rev = repo.changelog.rev(node)
609 609 return repo[rev]
610 610 except KeyError:
611 611 pass
612 612
613 613 node = resolvehexnodeidprefix(repo, symbol)
614 614 if node is not None:
615 615 rev = repo.changelog.rev(node)
616 616 return repo[rev]
617 617
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619 619
620 620 except error.WdirUnsupported:
621 621 return repo[None]
622 622 except (error.FilteredIndexError, error.FilteredLookupError,
623 623 error.FilteredRepoLookupError):
624 624 raise _filterederror(repo, symbol)
625 625
626 626 def _filterederror(repo, changeid):
627 627 """build an exception to be raised about a filtered changeid
628 628
629 629 This is extracted in a function to help extensions (eg: evolve) to
630 630 experiment with various message variants."""
631 631 if repo.filtername.startswith('visible'):
632 632
633 633 # Check if the changeset is obsolete
634 634 unfilteredrepo = repo.unfiltered()
635 635 ctx = revsymbol(unfilteredrepo, changeid)
636 636
637 637 # If the changeset is obsolete, enrich the message with the reason
638 638 # that made this changeset not visible
639 639 if ctx.obsolete():
640 640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 641 else:
642 642 msg = _("hidden revision '%s'") % changeid
643 643
644 644 hint = _('use --hidden to access hidden revisions')
645 645
646 646 return error.FilteredRepoLookupError(msg, hint=hint)
647 647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 648 msg %= (changeid, repo.filtername)
649 649 return error.FilteredRepoLookupError(msg)
650 650
651 651 def revsingle(repo, revspec, default='.', localalias=None):
652 652 if not revspec and revspec != 0:
653 653 return repo[default]
654 654
655 655 l = revrange(repo, [revspec], localalias=localalias)
656 656 if not l:
657 657 raise error.Abort(_('empty revision set'))
658 658 return repo[l.last()]
659 659
660 660 def _pairspec(revspec):
661 661 tree = revsetlang.parse(revspec)
662 662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663 663
664 664 def revpair(repo, revs):
665 665 if not revs:
666 666 return repo['.'], repo[None]
667 667
668 668 l = revrange(repo, revs)
669 669
670 670 if not l:
671 671 raise error.Abort(_('empty revision range'))
672 672
673 673 first = l.first()
674 674 second = l.last()
675 675
676 676 if (first == second and len(revs) >= 2
677 677 and not all(revrange(repo, [r]) for r in revs)):
678 678 raise error.Abort(_('empty revision on one side of range'))
679 679
680 680 # if top-level is range expression, the result must always be a pair
681 681 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
682 682 return repo[first], repo[None]
683 683
684 684 return repo[first], repo[second]
685 685
686 686 def revrange(repo, specs, localalias=None):
687 687 """Execute 1 to many revsets and return the union.
688 688
689 689 This is the preferred mechanism for executing revsets using user-specified
690 690 config options, such as revset aliases.
691 691
692 692 The revsets specified by ``specs`` will be executed via a chained ``OR``
693 693 expression. If ``specs`` is empty, an empty result is returned.
694 694
695 695 ``specs`` can contain integers, in which case they are assumed to be
696 696 revision numbers.
697 697
698 698 It is assumed the revsets are already formatted. If you have arguments
699 699 that need to be expanded in the revset, call ``revsetlang.formatspec()``
700 700 and pass the result as an element of ``specs``.
701 701
702 702 Specifying a single revset is allowed.
703 703
704 704 Returns a ``revset.abstractsmartset`` which is a list-like interface over
705 705 integer revisions.
706 706 """
707 707 allspecs = []
708 708 for spec in specs:
709 709 if isinstance(spec, int):
710 710 spec = revsetlang.formatspec('%d', spec)
711 711 allspecs.append(spec)
712 712 return repo.anyrevs(allspecs, user=True, localalias=localalias)
713 713
714 714 def meaningfulparents(repo, ctx):
715 715 """Return list of meaningful (or all if debug) parentrevs for rev.
716 716
717 717 For merges (two non-nullrev revisions) both parents are meaningful.
718 718 Otherwise the first parent revision is considered meaningful if it
719 719 is not the preceding revision.
720 720 """
721 721 parents = ctx.parents()
722 722 if len(parents) > 1:
723 723 return parents
724 724 if repo.ui.debugflag:
725 725 return [parents[0], repo[nullrev]]
726 726 if parents[0].rev() >= intrev(ctx) - 1:
727 727 return []
728 728 return parents
729 729
730 730 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
731 731 """Return a function that produced paths for presenting to the user.
732 732
733 733 The returned function takes a repo-relative path and produces a path
734 734 that can be presented in the UI.
735 735
736 736 Depending on the value of ui.relative-paths, either a repo-relative or
737 737 cwd-relative path will be produced.
738 738
739 739 legacyrelativevalue is the value to use if ui.relative-paths=legacy
740 740
741 741 If forcerelativevalue is not None, then that value will be used regardless
742 742 of what ui.relative-paths is set to.
743 743 """
744 744 if forcerelativevalue is not None:
745 745 relative = forcerelativevalue
746 746 else:
747 747 config = repo.ui.config('ui', 'relative-paths')
748 748 if config == 'legacy':
749 749 relative = legacyrelativevalue
750 750 else:
751 751 relative = stringutil.parsebool(config)
752 752 if relative is None:
753 753 raise error.ConfigError(
754 754 _("ui.relative-paths is not a boolean ('%s')") % config)
755 755
756 756 if relative:
757 757 cwd = repo.getcwd()
758 758 pathto = repo.pathto
759 759 return lambda f: pathto(f, cwd)
760 760 elif repo.ui.configbool('ui', 'slash'):
761 761 return lambda f: f
762 762 else:
763 763 return util.localpath
764 764
765 765 def subdiruipathfn(subpath, uipathfn):
766 766 '''Create a new uipathfn that treats the file as relative to subpath.'''
767 767 return lambda f: uipathfn(posixpath.join(subpath, f))
768 768
769 769 def anypats(pats, opts):
770 770 '''Checks if any patterns, including --include and --exclude were given.
771 771
772 772 Some commands (e.g. addremove) use this condition for deciding whether to
773 773 print absolute or relative paths.
774 774 '''
775 775 return bool(pats or opts.get('include') or opts.get('exclude'))
776 776
777 777 def expandpats(pats):
778 778 '''Expand bare globs when running on windows.
779 779 On posix we assume it already has already been done by sh.'''
780 780 if not util.expandglobs:
781 781 return list(pats)
782 782 ret = []
783 783 for kindpat in pats:
784 784 kind, pat = matchmod._patsplit(kindpat, None)
785 785 if kind is None:
786 786 try:
787 787 globbed = glob.glob(pat)
788 788 except re.error:
789 789 globbed = [pat]
790 790 if globbed:
791 791 ret.extend(globbed)
792 792 continue
793 793 ret.append(kindpat)
794 794 return ret
795 795
796 796 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
797 797 badfn=None):
798 798 '''Return a matcher and the patterns that were used.
799 799 The matcher will warn about bad matches, unless an alternate badfn callback
800 800 is provided.'''
801 801 if opts is None:
802 802 opts = {}
803 803 if not globbed and default == 'relpath':
804 804 pats = expandpats(pats or [])
805 805
806 806 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
807 807 def bad(f, msg):
808 808 ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
809 809
810 810 if badfn is None:
811 811 badfn = bad
812 812
813 813 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
814 814 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
815 815
816 816 if m.always():
817 817 pats = []
818 818 return m, pats
819 819
820 820 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
821 821 badfn=None):
822 822 '''Return a matcher that will warn about bad matches.'''
823 823 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
824 824
825 825 def matchall(repo):
826 826 '''Return a matcher that will efficiently match everything.'''
827 827 return matchmod.always()
828 828
829 829 def matchfiles(repo, files, badfn=None):
830 830 '''Return a matcher that will efficiently match exactly these files.'''
831 831 return matchmod.exact(files, badfn=badfn)
832 832
833 833 def parsefollowlinespattern(repo, rev, pat, msg):
834 834 """Return a file name from `pat` pattern suitable for usage in followlines
835 835 logic.
836 836 """
837 837 if not matchmod.patkind(pat):
838 838 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
839 839 else:
840 840 ctx = repo[rev]
841 841 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
842 842 files = [f for f in ctx if m(f)]
843 843 if len(files) != 1:
844 844 raise error.ParseError(msg)
845 845 return files[0]
846 846
847 847 def getorigvfs(ui, repo):
848 848 """return a vfs suitable to save 'orig' file
849 849
850 850 return None if no special directory is configured"""
851 851 origbackuppath = ui.config('ui', 'origbackuppath')
852 852 if not origbackuppath:
853 853 return None
854 854 return vfs.vfs(repo.wvfs.join(origbackuppath))
855 855
856 856 def backuppath(ui, repo, filepath):
857 857 '''customize where working copy backup files (.orig files) are created
858 858
859 859 Fetch user defined path from config file: [ui] origbackuppath = <path>
860 860 Fall back to default (filepath with .orig suffix) if not specified
861 861
862 862 filepath is repo-relative
863 863
864 864 Returns an absolute path
865 865 '''
866 866 origvfs = getorigvfs(ui, repo)
867 867 if origvfs is None:
868 868 return repo.wjoin(filepath + ".orig")
869 869
870 870 origbackupdir = origvfs.dirname(filepath)
871 871 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
872 872 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
873 873
874 874 # Remove any files that conflict with the backup file's path
875 875 for f in reversed(list(util.finddirs(filepath))):
876 876 if origvfs.isfileorlink(f):
877 877 ui.note(_('removing conflicting file: %s\n')
878 878 % origvfs.join(f))
879 879 origvfs.unlink(f)
880 880 break
881 881
882 882 origvfs.makedirs(origbackupdir)
883 883
884 884 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
885 885 ui.note(_('removing conflicting directory: %s\n')
886 886 % origvfs.join(filepath))
887 887 origvfs.rmtree(filepath, forcibly=True)
888 888
889 889 return origvfs.join(filepath)
890 890
891 891 class _containsnode(object):
892 892 """proxy __contains__(node) to container.__contains__ which accepts revs"""
893 893
894 894 def __init__(self, repo, revcontainer):
895 895 self._torev = repo.changelog.rev
896 896 self._revcontains = revcontainer.__contains__
897 897
898 898 def __contains__(self, node):
899 899 return self._revcontains(self._torev(node))
900 900
901 901 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
902 902 fixphase=False, targetphase=None, backup=True):
903 903 """do common cleanups when old nodes are replaced by new nodes
904 904
905 905 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
906 906 (we might also want to move working directory parent in the future)
907 907
908 908 By default, bookmark moves are calculated automatically from 'replacements',
909 909 but 'moves' can be used to override that. Also, 'moves' may include
910 910 additional bookmark moves that should not have associated obsmarkers.
911 911
912 912 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
913 913 have replacements. operation is a string, like "rebase".
914 914
915 915 metadata is dictionary containing metadata to be stored in obsmarker if
916 916 obsolescence is enabled.
917 917 """
918 918 assert fixphase or targetphase is None
919 919 if not replacements and not moves:
920 920 return
921 921
922 922 # translate mapping's other forms
923 923 if not util.safehasattr(replacements, 'items'):
924 924 replacements = {(n,): () for n in replacements}
925 925 else:
926 926 # upgrading non tuple "source" to tuple ones for BC
927 927 repls = {}
928 928 for key, value in replacements.items():
929 929 if not isinstance(key, tuple):
930 930 key = (key,)
931 931 repls[key] = value
932 932 replacements = repls
933 933
934 934 # Unfiltered repo is needed since nodes in replacements might be hidden.
935 935 unfi = repo.unfiltered()
936 936
937 937 # Calculate bookmark movements
938 938 if moves is None:
939 939 moves = {}
940 940 for oldnodes, newnodes in replacements.items():
941 941 for oldnode in oldnodes:
942 942 if oldnode in moves:
943 943 continue
944 944 if len(newnodes) > 1:
945 945 # usually a split, take the one with biggest rev number
946 946 newnode = next(unfi.set('max(%ln)', newnodes)).node()
947 947 elif len(newnodes) == 0:
948 948 # move bookmark backwards
949 949 allreplaced = []
950 950 for rep in replacements:
951 951 allreplaced.extend(rep)
952 952 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
953 953 allreplaced))
954 954 if roots:
955 955 newnode = roots[0].node()
956 956 else:
957 957 newnode = nullid
958 958 else:
959 959 newnode = newnodes[0]
960 960 moves[oldnode] = newnode
961 961
962 962 allnewnodes = [n for ns in replacements.values() for n in ns]
963 963 toretract = {}
964 964 toadvance = {}
965 965 if fixphase:
966 966 precursors = {}
967 967 for oldnodes, newnodes in replacements.items():
968 968 for oldnode in oldnodes:
969 969 for newnode in newnodes:
970 970 precursors.setdefault(newnode, []).append(oldnode)
971 971
972 972 allnewnodes.sort(key=lambda n: unfi[n].rev())
973 973 newphases = {}
974 974 def phase(ctx):
975 975 return newphases.get(ctx.node(), ctx.phase())
976 976 for newnode in allnewnodes:
977 977 ctx = unfi[newnode]
978 978 parentphase = max(phase(p) for p in ctx.parents())
979 979 if targetphase is None:
980 980 oldphase = max(unfi[oldnode].phase()
981 981 for oldnode in precursors[newnode])
982 982 newphase = max(oldphase, parentphase)
983 983 else:
984 984 newphase = max(targetphase, parentphase)
985 985 newphases[newnode] = newphase
986 986 if newphase > ctx.phase():
987 987 toretract.setdefault(newphase, []).append(newnode)
988 988 elif newphase < ctx.phase():
989 989 toadvance.setdefault(newphase, []).append(newnode)
990 990
991 991 with repo.transaction('cleanup') as tr:
992 992 # Move bookmarks
993 993 bmarks = repo._bookmarks
994 994 bmarkchanges = []
995 995 for oldnode, newnode in moves.items():
996 996 oldbmarks = repo.nodebookmarks(oldnode)
997 997 if not oldbmarks:
998 998 continue
999 999 from . import bookmarks # avoid import cycle
1000 1000 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
1001 1001 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1002 1002 hex(oldnode), hex(newnode)))
1003 1003 # Delete divergent bookmarks being parents of related newnodes
1004 1004 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1005 1005 allnewnodes, newnode, oldnode)
1006 1006 deletenodes = _containsnode(repo, deleterevs)
1007 1007 for name in oldbmarks:
1008 1008 bmarkchanges.append((name, newnode))
1009 1009 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1010 1010 bmarkchanges.append((b, None))
1011 1011
1012 1012 if bmarkchanges:
1013 1013 bmarks.applychanges(repo, tr, bmarkchanges)
1014 1014
1015 1015 for phase, nodes in toretract.items():
1016 1016 phases.retractboundary(repo, tr, phase, nodes)
1017 1017 for phase, nodes in toadvance.items():
1018 1018 phases.advanceboundary(repo, tr, phase, nodes)
1019 1019
1020 1020 mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
1021 1021 # Obsolete or strip nodes
1022 1022 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1023 1023 # If a node is already obsoleted, and we want to obsolete it
1024 1024 # without a successor, skip that obssolete request since it's
1025 1025 # unnecessary. That's the "if s or not isobs(n)" check below.
1026 1026 # Also sort the node in topology order, that might be useful for
1027 1027 # some obsstore logic.
1028 1028 # NOTE: the sorting might belong to createmarkers.
1029 1029 torev = unfi.changelog.rev
1030 1030 sortfunc = lambda ns: torev(ns[0][0])
1031 1031 rels = []
1032 1032 for ns, s in sorted(replacements.items(), key=sortfunc):
1033 1033 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1034 1034 rels.append(rel)
1035 1035 if rels:
1036 1036 obsolete.createmarkers(repo, rels, operation=operation,
1037 1037 metadata=metadata)
1038 1038 elif phases.supportinternal(repo) and mayusearchived:
1039 1039 # this assume we do not have "unstable" nodes above the cleaned ones
1040 1040 allreplaced = set()
1041 1041 for ns in replacements.keys():
1042 1042 allreplaced.update(ns)
1043 1043 if backup:
1044 1044 from . import repair # avoid import cycle
1045 1045 node = min(allreplaced, key=repo.changelog.rev)
1046 1046 repair.backupbundle(repo, allreplaced, allreplaced, node,
1047 1047 operation)
1048 1048 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1049 1049 else:
1050 1050 from . import repair # avoid import cycle
1051 1051 tostrip = list(n for ns in replacements for n in ns)
1052 1052 if tostrip:
1053 1053 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1054 1054 backup=backup)
1055 1055
1056 1056 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1057 1057 if opts is None:
1058 1058 opts = {}
1059 1059 m = matcher
1060 1060 dry_run = opts.get('dry_run')
1061 1061 try:
1062 1062 similarity = float(opts.get('similarity') or 0)
1063 1063 except ValueError:
1064 1064 raise error.Abort(_('similarity must be a number'))
1065 1065 if similarity < 0 or similarity > 100:
1066 1066 raise error.Abort(_('similarity must be between 0 and 100'))
1067 1067 similarity /= 100.0
1068 1068
1069 1069 ret = 0
1070 1070
1071 1071 wctx = repo[None]
1072 1072 for subpath in sorted(wctx.substate):
1073 1073 submatch = matchmod.subdirmatcher(subpath, m)
1074 1074 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1075 1075 sub = wctx.sub(subpath)
1076 1076 subprefix = repo.wvfs.reljoin(prefix, subpath)
1077 1077 subuipathfn = subdiruipathfn(subpath, uipathfn)
1078 1078 try:
1079 1079 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1080 1080 ret = 1
1081 1081 except error.LookupError:
1082 1082 repo.ui.status(_("skipping missing subrepository: %s\n")
1083 1083 % uipathfn(subpath))
1084 1084
1085 1085 rejected = []
1086 1086 def badfn(f, msg):
1087 1087 if f in m.files():
1088 1088 m.bad(f, msg)
1089 1089 rejected.append(f)
1090 1090
1091 1091 badmatch = matchmod.badmatch(m, badfn)
1092 1092 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1093 1093 badmatch)
1094 1094
1095 1095 unknownset = set(unknown + forgotten)
1096 1096 toprint = unknownset.copy()
1097 1097 toprint.update(deleted)
1098 1098 for abs in sorted(toprint):
1099 1099 if repo.ui.verbose or not m.exact(abs):
1100 1100 if abs in unknownset:
1101 1101 status = _('adding %s\n') % uipathfn(abs)
1102 1102 label = 'ui.addremove.added'
1103 1103 else:
1104 1104 status = _('removing %s\n') % uipathfn(abs)
1105 1105 label = 'ui.addremove.removed'
1106 1106 repo.ui.status(status, label=label)
1107 1107
1108 1108 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1109 1109 similarity, uipathfn)
1110 1110
1111 1111 if not dry_run:
1112 1112 _markchanges(repo, unknown + forgotten, deleted, renames)
1113 1113
1114 1114 for f in rejected:
1115 1115 if f in m.files():
1116 1116 return 1
1117 1117 return ret
1118 1118
1119 1119 def marktouched(repo, files, similarity=0.0):
1120 1120 '''Assert that files have somehow been operated upon. files are relative to
1121 1121 the repo root.'''
1122 1122 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1123 1123 rejected = []
1124 1124
1125 1125 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1126 1126
1127 1127 if repo.ui.verbose:
1128 1128 unknownset = set(unknown + forgotten)
1129 1129 toprint = unknownset.copy()
1130 1130 toprint.update(deleted)
1131 1131 for abs in sorted(toprint):
1132 1132 if abs in unknownset:
1133 1133 status = _('adding %s\n') % abs
1134 1134 else:
1135 1135 status = _('removing %s\n') % abs
1136 1136 repo.ui.status(status)
1137 1137
1138 1138 # TODO: We should probably have the caller pass in uipathfn and apply it to
1139 1139 # the messages above too. legacyrelativevalue=True is consistent with how
1140 1140 # it used to work.
1141 1141 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1142 1142 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1143 1143 similarity, uipathfn)
1144 1144
1145 1145 _markchanges(repo, unknown + forgotten, deleted, renames)
1146 1146
1147 1147 for f in rejected:
1148 1148 if f in m.files():
1149 1149 return 1
1150 1150 return 0
1151 1151
1152 1152 def _interestingfiles(repo, matcher):
1153 1153 '''Walk dirstate with matcher, looking for files that addremove would care
1154 1154 about.
1155 1155
1156 1156 This is different from dirstate.status because it doesn't care about
1157 1157 whether files are modified or clean.'''
1158 1158 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1159 1159 audit_path = pathutil.pathauditor(repo.root, cached=True)
1160 1160
1161 1161 ctx = repo[None]
1162 1162 dirstate = repo.dirstate
1163 1163 matcher = repo.narrowmatch(matcher, includeexact=True)
1164 1164 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1165 1165 unknown=True, ignored=False, full=False)
1166 1166 for abs, st in walkresults.iteritems():
1167 1167 dstate = dirstate[abs]
1168 1168 if dstate == '?' and audit_path.check(abs):
1169 1169 unknown.append(abs)
1170 1170 elif dstate != 'r' and not st:
1171 1171 deleted.append(abs)
1172 1172 elif dstate == 'r' and st:
1173 1173 forgotten.append(abs)
1174 1174 # for finding renames
1175 1175 elif dstate == 'r' and not st:
1176 1176 removed.append(abs)
1177 1177 elif dstate == 'a':
1178 1178 added.append(abs)
1179 1179
1180 1180 return added, unknown, deleted, removed, forgotten
1181 1181
1182 1182 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1183 1183 '''Find renames from removed files to added ones.'''
1184 1184 renames = {}
1185 1185 if similarity > 0:
1186 1186 for old, new, score in similar.findrenames(repo, added, removed,
1187 1187 similarity):
1188 1188 if (repo.ui.verbose or not matcher.exact(old)
1189 1189 or not matcher.exact(new)):
1190 1190 repo.ui.status(_('recording removal of %s as rename to %s '
1191 1191 '(%d%% similar)\n') %
1192 1192 (uipathfn(old), uipathfn(new),
1193 1193 score * 100))
1194 1194 renames[new] = old
1195 1195 return renames
1196 1196
1197 1197 def _markchanges(repo, unknown, deleted, renames):
1198 1198 '''Marks the files in unknown as added, the files in deleted as removed,
1199 1199 and the files in renames as copied.'''
1200 1200 wctx = repo[None]
1201 1201 with repo.wlock():
1202 1202 wctx.forget(deleted)
1203 1203 wctx.add(unknown)
1204 1204 for new, old in renames.iteritems():
1205 1205 wctx.copy(old, new)
1206 1206
1207 1207 def getrenamedfn(repo, endrev=None):
1208 1208 if copiesmod.usechangesetcentricalgo(repo):
1209 1209 def getrenamed(fn, rev):
1210 1210 ctx = repo[rev]
1211 1211 p1copies = ctx.p1copies()
1212 1212 if fn in p1copies:
1213 1213 return p1copies[fn]
1214 1214 p2copies = ctx.p2copies()
1215 1215 if fn in p2copies:
1216 1216 return p2copies[fn]
1217 1217 return None
1218 1218 return getrenamed
1219 1219
1220 1220 rcache = {}
1221 1221 if endrev is None:
1222 1222 endrev = len(repo)
1223 1223
1224 1224 def getrenamed(fn, rev):
1225 1225 '''looks up all renames for a file (up to endrev) the first
1226 1226 time the file is given. It indexes on the changerev and only
1227 1227 parses the manifest if linkrev != changerev.
1228 1228 Returns rename info for fn at changerev rev.'''
1229 1229 if fn not in rcache:
1230 1230 rcache[fn] = {}
1231 1231 fl = repo.file(fn)
1232 1232 for i in fl:
1233 1233 lr = fl.linkrev(i)
1234 1234 renamed = fl.renamed(fl.node(i))
1235 1235 rcache[fn][lr] = renamed and renamed[0]
1236 1236 if lr >= endrev:
1237 1237 break
1238 1238 if rev in rcache[fn]:
1239 1239 return rcache[fn][rev]
1240 1240
1241 1241 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1242 1242 # filectx logic.
1243 1243 try:
1244 1244 return repo[rev][fn].copysource()
1245 1245 except error.LookupError:
1246 1246 return None
1247 1247
1248 1248 return getrenamed
1249 1249
1250 1250 def getcopiesfn(repo, endrev=None):
1251 1251 if copiesmod.usechangesetcentricalgo(repo):
1252 1252 def copiesfn(ctx):
1253 1253 if ctx.p2copies():
1254 1254 allcopies = ctx.p1copies().copy()
1255 1255 # There should be no overlap
1256 1256 allcopies.update(ctx.p2copies())
1257 1257 return sorted(allcopies.items())
1258 1258 else:
1259 1259 return sorted(ctx.p1copies().items())
1260 1260 else:
1261 1261 getrenamed = getrenamedfn(repo, endrev)
1262 1262 def copiesfn(ctx):
1263 1263 copies = []
1264 1264 for fn in ctx.files():
1265 1265 rename = getrenamed(fn, ctx.rev())
1266 1266 if rename:
1267 1267 copies.append((fn, rename))
1268 1268 return copies
1269 1269
1270 1270 return copiesfn
1271 1271
1272 1272 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1273 1273 """Update the dirstate to reflect the intent of copying src to dst. For
1274 1274 different reasons it might not end with dst being marked as copied from src.
1275 1275 """
1276 1276 origsrc = repo.dirstate.copied(src) or src
1277 1277 if dst == origsrc: # copying back a copy?
1278 1278 if repo.dirstate[dst] not in 'mn' and not dryrun:
1279 1279 repo.dirstate.normallookup(dst)
1280 1280 else:
1281 1281 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1282 1282 if not ui.quiet:
1283 1283 ui.warn(_("%s has not been committed yet, so no copy "
1284 1284 "data will be stored for %s.\n")
1285 1285 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1286 1286 if repo.dirstate[dst] in '?r' and not dryrun:
1287 1287 wctx.add([dst])
1288 1288 elif not dryrun:
1289 1289 wctx.copy(origsrc, dst)
1290 1290
1291 1291 def movedirstate(repo, newctx, match=None):
1292 1292 """Move the dirstate to newctx and adjust it as necessary.
1293 1293
1294 1294 A matcher can be provided as an optimization. It is probably a bug to pass
1295 1295 a matcher that doesn't match all the differences between the parent of the
1296 1296 working copy and newctx.
1297 1297 """
1298 1298 oldctx = repo['.']
1299 1299 ds = repo.dirstate
1300 1300 ds.setparents(newctx.node(), nullid)
1301 1301 copies = dict(ds.copies())
1302 1302 s = newctx.status(oldctx, match=match)
1303 1303 for f in s.modified:
1304 1304 if ds[f] == 'r':
1305 1305 # modified + removed -> removed
1306 1306 continue
1307 1307 ds.normallookup(f)
1308 1308
1309 1309 for f in s.added:
1310 1310 if ds[f] == 'r':
1311 1311 # added + removed -> unknown
1312 1312 ds.drop(f)
1313 1313 elif ds[f] != 'a':
1314 1314 ds.add(f)
1315 1315
1316 1316 for f in s.removed:
1317 1317 if ds[f] == 'a':
1318 1318 # removed + added -> normal
1319 1319 ds.normallookup(f)
1320 1320 elif ds[f] != 'r':
1321 1321 ds.remove(f)
1322 1322
1323 1323 # Merge old parent and old working dir copies
1324 1324 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1325 1325 oldcopies.update(copies)
1326 1326 copies = dict((dst, oldcopies.get(src, src))
1327 1327 for dst, src in oldcopies.iteritems())
1328 1328 # Adjust the dirstate copies
1329 1329 for dst, src in copies.iteritems():
1330 1330 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
1331 1331 src = None
1332 1332 ds.copy(src, dst)
1333 1333
1334 1334 def writerequires(opener, requirements):
1335 1335 with opener('requires', 'w', atomictemp=True) as fp:
1336 1336 for r in sorted(requirements):
1337 1337 fp.write("%s\n" % r)
1338 1338
1339 1339 class filecachesubentry(object):
1340 1340 def __init__(self, path, stat):
1341 1341 self.path = path
1342 1342 self.cachestat = None
1343 1343 self._cacheable = None
1344 1344
1345 1345 if stat:
1346 1346 self.cachestat = filecachesubentry.stat(self.path)
1347 1347
1348 1348 if self.cachestat:
1349 1349 self._cacheable = self.cachestat.cacheable()
1350 1350 else:
1351 1351 # None means we don't know yet
1352 1352 self._cacheable = None
1353 1353
1354 1354 def refresh(self):
1355 1355 if self.cacheable():
1356 1356 self.cachestat = filecachesubentry.stat(self.path)
1357 1357
1358 1358 def cacheable(self):
1359 1359 if self._cacheable is not None:
1360 1360 return self._cacheable
1361 1361
1362 1362 # we don't know yet, assume it is for now
1363 1363 return True
1364 1364
1365 1365 def changed(self):
1366 1366 # no point in going further if we can't cache it
1367 1367 if not self.cacheable():
1368 1368 return True
1369 1369
1370 1370 newstat = filecachesubentry.stat(self.path)
1371 1371
1372 1372 # we may not know if it's cacheable yet, check again now
1373 1373 if newstat and self._cacheable is None:
1374 1374 self._cacheable = newstat.cacheable()
1375 1375
1376 1376 # check again
1377 1377 if not self._cacheable:
1378 1378 return True
1379 1379
1380 1380 if self.cachestat != newstat:
1381 1381 self.cachestat = newstat
1382 1382 return True
1383 1383 else:
1384 1384 return False
1385 1385
1386 1386 @staticmethod
1387 1387 def stat(path):
1388 1388 try:
1389 1389 return util.cachestat(path)
1390 1390 except OSError as e:
1391 1391 if e.errno != errno.ENOENT:
1392 1392 raise
1393 1393
1394 1394 class filecacheentry(object):
1395 1395 def __init__(self, paths, stat=True):
1396 1396 self._entries = []
1397 1397 for path in paths:
1398 1398 self._entries.append(filecachesubentry(path, stat))
1399 1399
1400 1400 def changed(self):
1401 1401 '''true if any entry has changed'''
1402 1402 for entry in self._entries:
1403 1403 if entry.changed():
1404 1404 return True
1405 1405 return False
1406 1406
1407 1407 def refresh(self):
1408 1408 for entry in self._entries:
1409 1409 entry.refresh()
1410 1410
1411 1411 class filecache(object):
1412 1412 """A property like decorator that tracks files under .hg/ for updates.
1413 1413
1414 1414 On first access, the files defined as arguments are stat()ed and the
1415 1415 results cached. The decorated function is called. The results are stashed
1416 1416 away in a ``_filecache`` dict on the object whose method is decorated.
1417 1417
1418 1418 On subsequent access, the cached result is used as it is set to the
1419 1419 instance dictionary.
1420 1420
1421 1421 On external property set/delete operations, the caller must update the
1422 1422 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1423 1423 instead of directly setting <attr>.
1424 1424
1425 1425 When using the property API, the cached data is always used if available.
1426 1426 No stat() is performed to check if the file has changed.
1427 1427
1428 1428 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1429 1429 can populate an entry before the property's getter is called. In this case,
1430 1430 entries in ``_filecache`` will be used during property operations,
1431 1431 if available. If the underlying file changes, it is up to external callers
1432 1432 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1433 1433 method result as well as possibly calling ``del obj._filecache[attr]`` to
1434 1434 remove the ``filecacheentry``.
1435 1435 """
1436 1436
1437 1437 def __init__(self, *paths):
1438 1438 self.paths = paths
1439 1439
1440 1440 def join(self, obj, fname):
1441 1441 """Used to compute the runtime path of a cached file.
1442 1442
1443 1443 Users should subclass filecache and provide their own version of this
1444 1444 function to call the appropriate join function on 'obj' (an instance
1445 1445 of the class that its member function was decorated).
1446 1446 """
1447 1447 raise NotImplementedError
1448 1448
1449 1449 def __call__(self, func):
1450 1450 self.func = func
1451 1451 self.sname = func.__name__
1452 1452 self.name = pycompat.sysbytes(self.sname)
1453 1453 return self
1454 1454
1455 1455 def __get__(self, obj, type=None):
1456 1456 # if accessed on the class, return the descriptor itself.
1457 1457 if obj is None:
1458 1458 return self
1459 1459
1460 1460 assert self.sname not in obj.__dict__
1461 1461
1462 1462 entry = obj._filecache.get(self.name)
1463 1463
1464 1464 if entry:
1465 1465 if entry.changed():
1466 1466 entry.obj = self.func(obj)
1467 1467 else:
1468 1468 paths = [self.join(obj, path) for path in self.paths]
1469 1469
1470 1470 # We stat -before- creating the object so our cache doesn't lie if
1471 1471 # a writer modified between the time we read and stat
1472 1472 entry = filecacheentry(paths, True)
1473 1473 entry.obj = self.func(obj)
1474 1474
1475 1475 obj._filecache[self.name] = entry
1476 1476
1477 1477 obj.__dict__[self.sname] = entry.obj
1478 1478 return entry.obj
1479 1479
1480 1480 # don't implement __set__(), which would make __dict__ lookup as slow as
1481 1481 # function call.
1482 1482
1483 1483 def set(self, obj, value):
1484 1484 if self.name not in obj._filecache:
1485 1485 # we add an entry for the missing value because X in __dict__
1486 1486 # implies X in _filecache
1487 1487 paths = [self.join(obj, path) for path in self.paths]
1488 1488 ce = filecacheentry(paths, False)
1489 1489 obj._filecache[self.name] = ce
1490 1490 else:
1491 1491 ce = obj._filecache[self.name]
1492 1492
1493 1493 ce.obj = value # update cached copy
1494 1494 obj.__dict__[self.sname] = value # update copy returned by obj.x
1495 1495
1496 1496 def extdatasource(repo, source):
1497 1497 """Gather a map of rev -> value dict from the specified source
1498 1498
1499 1499 A source spec is treated as a URL, with a special case shell: type
1500 1500 for parsing the output from a shell command.
1501 1501
1502 1502 The data is parsed as a series of newline-separated records where
1503 1503 each record is a revision specifier optionally followed by a space
1504 1504 and a freeform string value. If the revision is known locally, it
1505 1505 is converted to a rev, otherwise the record is skipped.
1506 1506
1507 1507 Note that both key and value are treated as UTF-8 and converted to
1508 1508 the local encoding. This allows uniformity between local and
1509 1509 remote data sources.
1510 1510 """
1511 1511
1512 1512 spec = repo.ui.config("extdata", source)
1513 1513 if not spec:
1514 1514 raise error.Abort(_("unknown extdata source '%s'") % source)
1515 1515
1516 1516 data = {}
1517 1517 src = proc = None
1518 1518 try:
1519 1519 if spec.startswith("shell:"):
1520 1520 # external commands should be run relative to the repo root
1521 1521 cmd = spec[6:]
1522 1522 proc = subprocess.Popen(procutil.tonativestr(cmd),
1523 1523 shell=True, bufsize=-1,
1524 1524 close_fds=procutil.closefds,
1525 1525 stdout=subprocess.PIPE,
1526 1526 cwd=procutil.tonativestr(repo.root))
1527 1527 src = proc.stdout
1528 1528 else:
1529 1529 # treat as a URL or file
1530 1530 src = url.open(repo.ui, spec)
1531 1531 for l in src:
1532 1532 if " " in l:
1533 1533 k, v = l.strip().split(" ", 1)
1534 1534 else:
1535 1535 k, v = l.strip(), ""
1536 1536
1537 1537 k = encoding.tolocal(k)
1538 1538 try:
1539 1539 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1540 1540 except (error.LookupError, error.RepoLookupError):
1541 1541 pass # we ignore data for nodes that don't exist locally
1542 1542 finally:
1543 1543 if proc:
1544 1544 try:
1545 1545 proc.communicate()
1546 1546 except ValueError:
1547 1547 # This happens if we started iterating src and then
1548 1548 # get a parse error on a line. It should be safe to ignore.
1549 1549 pass
1550 1550 if src:
1551 1551 src.close()
1552 1552 if proc and proc.returncode != 0:
1553 1553 raise error.Abort(_("extdata command '%s' failed: %s")
1554 1554 % (cmd, procutil.explainexit(proc.returncode)))
1555 1555
1556 1556 return data
1557 1557
1558 1558 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1559 1559 if lock is None:
1560 1560 raise error.LockInheritanceContractViolation(
1561 1561 'lock can only be inherited while held')
1562 1562 if environ is None:
1563 1563 environ = {}
1564 1564 with lock.inherit() as locker:
1565 1565 environ[envvar] = locker
1566 1566 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1567 1567
1568 1568 def wlocksub(repo, cmd, *args, **kwargs):
1569 1569 """run cmd as a subprocess that allows inheriting repo's wlock
1570 1570
1571 1571 This can only be called while the wlock is held. This takes all the
1572 1572 arguments that ui.system does, and returns the exit code of the
1573 1573 subprocess."""
1574 1574 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1575 1575 **kwargs)
1576 1576
1577 1577 class progress(object):
1578 1578 def __init__(self, ui, updatebar, topic, unit="", total=None):
1579 1579 self.ui = ui
1580 1580 self.pos = 0
1581 1581 self.topic = topic
1582 1582 self.unit = unit
1583 1583 self.total = total
1584 1584 self.debug = ui.configbool('progress', 'debug')
1585 1585 self._updatebar = updatebar
1586 1586
1587 1587 def __enter__(self):
1588 1588 return self
1589 1589
1590 1590 def __exit__(self, exc_type, exc_value, exc_tb):
1591 1591 self.complete()
1592 1592
1593 1593 def update(self, pos, item="", total=None):
1594 1594 assert pos is not None
1595 1595 if total:
1596 1596 self.total = total
1597 1597 self.pos = pos
1598 1598 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1599 1599 if self.debug:
1600 1600 self._printdebug(item)
1601 1601
1602 1602 def increment(self, step=1, item="", total=None):
1603 1603 self.update(self.pos + step, item, total)
1604 1604
1605 1605 def complete(self):
1606 1606 self.pos = None
1607 1607 self.unit = ""
1608 1608 self.total = None
1609 1609 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1610 1610
1611 1611 def _printdebug(self, item):
1612 1612 if self.unit:
1613 1613 unit = ' ' + self.unit
1614 1614 if item:
1615 1615 item = ' ' + item
1616 1616
1617 1617 if self.total:
1618 1618 pct = 100.0 * self.pos / self.total
1619 1619 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1620 1620 % (self.topic, item, self.pos, self.total, unit, pct))
1621 1621 else:
1622 1622 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1623 1623
1624 1624 def gdinitconfig(ui):
1625 1625 """helper function to know if a repo should be created as general delta
1626 1626 """
1627 1627 # experimental config: format.generaldelta
1628 1628 return (ui.configbool('format', 'generaldelta')
1629 1629 or ui.configbool('format', 'usegeneraldelta'))
1630 1630
1631 1631 def gddeltaconfig(ui):
1632 1632 """helper function to know if incoming delta should be optimised
1633 1633 """
1634 1634 # experimental config: format.generaldelta
1635 1635 return ui.configbool('format', 'generaldelta')
1636 1636
1637 1637 class simplekeyvaluefile(object):
1638 1638 """A simple file with key=value lines
1639 1639
1640 1640 Keys must be alphanumerics and start with a letter, values must not
1641 1641 contain '\n' characters"""
1642 1642 firstlinekey = '__firstline'
1643 1643
1644 1644 def __init__(self, vfs, path, keys=None):
1645 1645 self.vfs = vfs
1646 1646 self.path = path
1647 1647
1648 1648 def read(self, firstlinenonkeyval=False):
1649 1649 """Read the contents of a simple key-value file
1650 1650
1651 1651 'firstlinenonkeyval' indicates whether the first line of file should
1652 1652 be treated as a key-value pair or reuturned fully under the
1653 1653 __firstline key."""
1654 1654 lines = self.vfs.readlines(self.path)
1655 1655 d = {}
1656 1656 if firstlinenonkeyval:
1657 1657 if not lines:
1658 1658 e = _("empty simplekeyvalue file")
1659 1659 raise error.CorruptedState(e)
1660 1660 # we don't want to include '\n' in the __firstline
1661 1661 d[self.firstlinekey] = lines[0][:-1]
1662 1662 del lines[0]
1663 1663
1664 1664 try:
1665 1665 # the 'if line.strip()' part prevents us from failing on empty
1666 1666 # lines which only contain '\n' therefore are not skipped
1667 1667 # by 'if line'
1668 1668 updatedict = dict(line[:-1].split('=', 1) for line in lines
1669 1669 if line.strip())
1670 1670 if self.firstlinekey in updatedict:
1671 1671 e = _("%r can't be used as a key")
1672 1672 raise error.CorruptedState(e % self.firstlinekey)
1673 1673 d.update(updatedict)
1674 1674 except ValueError as e:
1675 1675 raise error.CorruptedState(str(e))
1676 1676 return d
1677 1677
1678 1678 def write(self, data, firstline=None):
1679 1679 """Write key=>value mapping to a file
1680 1680 data is a dict. Keys must be alphanumerical and start with a letter.
1681 1681 Values must not contain newline characters.
1682 1682
1683 1683 If 'firstline' is not None, it is written to file before
1684 1684 everything else, as it is, not in a key=value form"""
1685 1685 lines = []
1686 1686 if firstline is not None:
1687 1687 lines.append('%s\n' % firstline)
1688 1688
1689 1689 for k, v in data.items():
1690 1690 if k == self.firstlinekey:
1691 1691 e = "key name '%s' is reserved" % self.firstlinekey
1692 1692 raise error.ProgrammingError(e)
1693 1693 if not k[0:1].isalpha():
1694 1694 e = "keys must start with a letter in a key-value file"
1695 1695 raise error.ProgrammingError(e)
1696 1696 if not k.isalnum():
1697 1697 e = "invalid key name in a simple key-value file"
1698 1698 raise error.ProgrammingError(e)
1699 1699 if '\n' in v:
1700 1700 e = "invalid value in a simple key-value file"
1701 1701 raise error.ProgrammingError(e)
1702 1702 lines.append("%s=%s\n" % (k, v))
1703 1703 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1704 1704 fp.write(''.join(lines))
1705 1705
1706 1706 _reportobsoletedsource = [
1707 1707 'debugobsolete',
1708 1708 'pull',
1709 1709 'push',
1710 1710 'serve',
1711 1711 'unbundle',
1712 1712 ]
1713 1713
1714 1714 _reportnewcssource = [
1715 1715 'pull',
1716 1716 'unbundle',
1717 1717 ]
1718 1718
1719 1719 def prefetchfiles(repo, revs, match):
1720 1720 """Invokes the registered file prefetch functions, allowing extensions to
1721 1721 ensure the corresponding files are available locally, before the command
1722 1722 uses them."""
1723 1723 if match:
1724 1724 # The command itself will complain about files that don't exist, so
1725 1725 # don't duplicate the message.
1726 1726 match = matchmod.badmatch(match, lambda fn, msg: None)
1727 1727 else:
1728 1728 match = matchall(repo)
1729 1729
1730 1730 fileprefetchhooks(repo, revs, match)
1731 1731
1732 1732 # a list of (repo, revs, match) prefetch functions
1733 1733 fileprefetchhooks = util.hooks()
1734 1734
1735 1735 # A marker that tells the evolve extension to suppress its own reporting
1736 1736 _reportstroubledchangesets = True
1737 1737
1738 1738 def registersummarycallback(repo, otr, txnname=''):
1739 1739 """register a callback to issue a summary after the transaction is closed
1740 1740 """
1741 1741 def txmatch(sources):
1742 1742 return any(txnname.startswith(source) for source in sources)
1743 1743
1744 1744 categories = []
1745 1745
1746 1746 def reportsummary(func):
1747 1747 """decorator for report callbacks."""
1748 1748 # The repoview life cycle is shorter than the one of the actual
1749 1749 # underlying repository. So the filtered object can die before the
1750 1750 # weakref is used leading to troubles. We keep a reference to the
1751 1751 # unfiltered object and restore the filtering when retrieving the
1752 1752 # repository through the weakref.
1753 1753 filtername = repo.filtername
1754 1754 reporef = weakref.ref(repo.unfiltered())
1755 1755 def wrapped(tr):
1756 1756 repo = reporef()
1757 1757 if filtername:
1758 1758 repo = repo.filtered(filtername)
1759 1759 func(repo, tr)
1760 1760 newcat = '%02i-txnreport' % len(categories)
1761 1761 otr.addpostclose(newcat, wrapped)
1762 1762 categories.append(newcat)
1763 1763 return wrapped
1764 1764
1765 1765 if txmatch(_reportobsoletedsource):
1766 1766 @reportsummary
1767 1767 def reportobsoleted(repo, tr):
1768 1768 obsoleted = obsutil.getobsoleted(repo, tr)
1769 1769 if obsoleted:
1770 1770 repo.ui.status(_('obsoleted %i changesets\n')
1771 1771 % len(obsoleted))
1772 1772
1773 1773 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1774 1774 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1775 1775 instabilitytypes = [
1776 1776 ('orphan', 'orphan'),
1777 1777 ('phase-divergent', 'phasedivergent'),
1778 1778 ('content-divergent', 'contentdivergent'),
1779 1779 ]
1780 1780
1781 1781 def getinstabilitycounts(repo):
1782 1782 filtered = repo.changelog.filteredrevs
1783 1783 counts = {}
1784 1784 for instability, revset in instabilitytypes:
1785 1785 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1786 1786 filtered)
1787 1787 return counts
1788 1788
1789 1789 oldinstabilitycounts = getinstabilitycounts(repo)
1790 1790 @reportsummary
1791 1791 def reportnewinstabilities(repo, tr):
1792 1792 newinstabilitycounts = getinstabilitycounts(repo)
1793 1793 for instability, revset in instabilitytypes:
1794 1794 delta = (newinstabilitycounts[instability] -
1795 1795 oldinstabilitycounts[instability])
1796 1796 msg = getinstabilitymessage(delta, instability)
1797 1797 if msg:
1798 1798 repo.ui.warn(msg)
1799 1799
1800 1800 if txmatch(_reportnewcssource):
1801 1801 @reportsummary
1802 1802 def reportnewcs(repo, tr):
1803 1803 """Report the range of new revisions pulled/unbundled."""
1804 1804 origrepolen = tr.changes.get('origrepolen', len(repo))
1805 1805 unfi = repo.unfiltered()
1806 1806 if origrepolen >= len(unfi):
1807 1807 return
1808 1808
1809 1809 # Compute the bounds of new visible revisions' range.
1810 1810 revs = smartset.spanset(repo, start=origrepolen)
1811 1811 if revs:
1812 1812 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1813 1813
1814 1814 if minrev == maxrev:
1815 1815 revrange = minrev
1816 1816 else:
1817 1817 revrange = '%s:%s' % (minrev, maxrev)
1818 1818 draft = len(repo.revs('%ld and draft()', revs))
1819 1819 secret = len(repo.revs('%ld and secret()', revs))
1820 1820 if not (draft or secret):
1821 1821 msg = _('new changesets %s\n') % revrange
1822 1822 elif draft and secret:
1823 1823 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1824 1824 msg %= (revrange, draft, secret)
1825 1825 elif draft:
1826 1826 msg = _('new changesets %s (%d drafts)\n')
1827 1827 msg %= (revrange, draft)
1828 1828 elif secret:
1829 1829 msg = _('new changesets %s (%d secrets)\n')
1830 1830 msg %= (revrange, secret)
1831 1831 else:
1832 1832 errormsg = 'entered unreachable condition'
1833 1833 raise error.ProgrammingError(errormsg)
1834 1834 repo.ui.status(msg)
1835 1835
1836 1836 # search new changesets directly pulled as obsolete
1837 1837 duplicates = tr.changes.get('revduplicates', ())
1838 1838 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1839 1839 origrepolen, duplicates)
1840 1840 cl = repo.changelog
1841 1841 extinctadded = [r for r in obsadded if r not in cl]
1842 1842 if extinctadded:
1843 1843 # They are not just obsolete, but obsolete and invisible
1844 1844 # we call them "extinct" internally but the terms have not been
1845 1845 # exposed to users.
1846 1846 msg = '(%d other changesets obsolete on arrival)\n'
1847 1847 repo.ui.status(msg % len(extinctadded))
1848 1848
1849 1849 @reportsummary
1850 1850 def reportphasechanges(repo, tr):
1851 1851 """Report statistics of phase changes for changesets pre-existing
1852 1852 pull/unbundle.
1853 1853 """
1854 1854 origrepolen = tr.changes.get('origrepolen', len(repo))
1855 1855 phasetracking = tr.changes.get('phases', {})
1856 1856 if not phasetracking:
1857 1857 return
1858 1858 published = [
1859 1859 rev for rev, (old, new) in phasetracking.iteritems()
1860 1860 if new == phases.public and rev < origrepolen
1861 1861 ]
1862 1862 if not published:
1863 1863 return
1864 1864 repo.ui.status(_('%d local changesets published\n')
1865 1865 % len(published))
1866 1866
1867 1867 def getinstabilitymessage(delta, instability):
1868 1868 """function to return the message to show warning about new instabilities
1869 1869
1870 1870 exists as a separate function so that extension can wrap to show more
1871 1871 information like how to fix instabilities"""
1872 1872 if delta > 0:
1873 1873 return _('%i new %s changesets\n') % (delta, instability)
1874 1874
1875 1875 def nodesummaries(repo, nodes, maxnumnodes=4):
1876 1876 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1877 1877 return ' '.join(short(h) for h in nodes)
1878 1878 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1879 1879 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1880 1880
1881 1881 def enforcesinglehead(repo, tr, desc):
1882 1882 """check that no named branch has multiple heads"""
1883 1883 if desc in ('strip', 'repair'):
1884 1884 # skip the logic during strip
1885 1885 return
1886 1886 visible = repo.filtered('visible')
1887 1887 # possible improvement: we could restrict the check to affected branch
1888 1888 for name, heads in visible.branchmap().iteritems():
1889 1889 if len(heads) > 1:
1890 1890 msg = _('rejecting multiple heads on branch "%s"')
1891 1891 msg %= name
1892 1892 hint = _('%d heads: %s')
1893 1893 hint %= (len(heads), nodesummaries(repo, heads))
1894 1894 raise error.Abort(msg, hint=hint)
1895 1895
1896 1896 def wrapconvertsink(sink):
1897 1897 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1898 1898 before it is used, whether or not the convert extension was formally loaded.
1899 1899 """
1900 1900 return sink
1901 1901
1902 1902 def unhidehashlikerevs(repo, specs, hiddentype):
1903 1903 """parse the user specs and unhide changesets whose hash or revision number
1904 1904 is passed.
1905 1905
1906 1906 hiddentype can be: 1) 'warn': warn while unhiding changesets
1907 1907 2) 'nowarn': don't warn while unhiding changesets
1908 1908
1909 1909 returns a repo object with the required changesets unhidden
1910 1910 """
1911 1911 if not repo.filtername or not repo.ui.configbool('experimental',
1912 1912 'directaccess'):
1913 1913 return repo
1914 1914
1915 1915 if repo.filtername not in ('visible', 'visible-hidden'):
1916 1916 return repo
1917 1917
1918 1918 symbols = set()
1919 1919 for spec in specs:
1920 1920 try:
1921 1921 tree = revsetlang.parse(spec)
1922 1922 except error.ParseError: # will be reported by scmutil.revrange()
1923 1923 continue
1924 1924
1925 1925 symbols.update(revsetlang.gethashlikesymbols(tree))
1926 1926
1927 1927 if not symbols:
1928 1928 return repo
1929 1929
1930 1930 revs = _getrevsfromsymbols(repo, symbols)
1931 1931
1932 1932 if not revs:
1933 1933 return repo
1934 1934
1935 1935 if hiddentype == 'warn':
1936 1936 unfi = repo.unfiltered()
1937 1937 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1938 1938 repo.ui.warn(_("warning: accessing hidden changesets for write "
1939 1939 "operation: %s\n") % revstr)
1940 1940
1941 1941 # we have to use new filtername to separate branch/tags cache until we can
1942 1942 # disbale these cache when revisions are dynamically pinned.
1943 1943 return repo.filtered('visible-hidden', revs)
1944 1944
1945 1945 def _getrevsfromsymbols(repo, symbols):
1946 1946 """parse the list of symbols and returns a set of revision numbers of hidden
1947 1947 changesets present in symbols"""
1948 1948 revs = set()
1949 1949 unfi = repo.unfiltered()
1950 1950 unficl = unfi.changelog
1951 1951 cl = repo.changelog
1952 1952 tiprev = len(unficl)
1953 1953 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1954 1954 for s in symbols:
1955 1955 try:
1956 1956 n = int(s)
1957 1957 if n <= tiprev:
1958 1958 if not allowrevnums:
1959 1959 continue
1960 1960 else:
1961 1961 if n not in cl:
1962 1962 revs.add(n)
1963 1963 continue
1964 1964 except ValueError:
1965 1965 pass
1966 1966
1967 1967 try:
1968 1968 s = resolvehexnodeidprefix(unfi, s)
1969 1969 except (error.LookupError, error.WdirUnsupported):
1970 1970 s = None
1971 1971
1972 1972 if s is not None:
1973 1973 rev = unficl.rev(s)
1974 1974 if rev not in cl:
1975 1975 revs.add(rev)
1976 1976
1977 1977 return revs
1978 1978
1979 1979 def bookmarkrevs(repo, mark):
1980 1980 """
1981 1981 Select revisions reachable by a given bookmark
1982 1982 """
1983 1983 return repo.revs("ancestors(bookmark(%s)) - "
1984 1984 "ancestors(head() and not bookmark(%s)) - "
1985 1985 "ancestors(bookmark() and not bookmark(%s))",
1986 1986 mark, mark, mark)
1987
1988 def computechangesetfilesadded(ctx):
1989 """return the list of files added in a changeset
1990 """
1991 added = []
1992 for f in ctx.files():
1993 if not any(f in p for p in ctx.parents()):
1994 added.append(f)
1995 return added
General Comments 0
You need to be logged in to leave comments. Login now