##// END OF EJS Templates
statichttprepo: prevent loading dirstate over HTTP on node lookup (issue5717)...
Yuya Nishihara -
r34927:f7e4d6c2 stable
parent child Browse files
Show More
@@ -1,2602 +1,2604 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import re
14 14 import stat
15 15
16 16 from .i18n import _
17 17 from .node import (
18 18 addednodeid,
19 19 bin,
20 20 hex,
21 21 modifiednodeid,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 wdirid,
26 26 wdirnodes,
27 27 wdirrev,
28 28 )
29 29 from .thirdparty import (
30 30 attr,
31 31 )
32 32 from . import (
33 33 encoding,
34 34 error,
35 35 fileset,
36 36 match as matchmod,
37 37 mdiff,
38 38 obsolete as obsmod,
39 39 patch,
40 40 pathutil,
41 41 phases,
42 42 pycompat,
43 43 repoview,
44 44 revlog,
45 45 scmutil,
46 46 sparse,
47 47 subrepo,
48 48 util,
49 49 )
50 50
51 51 propertycache = util.propertycache
52 52
53 53 nonascii = re.compile(r'[^\x21-\x7f]').search
54 54
55 55 class basectx(object):
56 56 """A basectx object represents the common logic for its children:
57 57 changectx: read-only context that is already present in the repo,
58 58 workingctx: a context that represents the working directory and can
59 59 be committed,
60 60 memctx: a context that represents changes in-memory and can also
61 61 be committed."""
62 62 def __new__(cls, repo, changeid='', *args, **kwargs):
63 63 if isinstance(changeid, basectx):
64 64 return changeid
65 65
66 66 o = super(basectx, cls).__new__(cls)
67 67
68 68 o._repo = repo
69 69 o._rev = nullrev
70 70 o._node = nullid
71 71
72 72 return o
73 73
74 74 def __bytes__(self):
75 75 return short(self.node())
76 76
77 77 __str__ = encoding.strmethod(__bytes__)
78 78
79 79 def __int__(self):
80 80 return self.rev()
81 81
82 82 def __repr__(self):
83 83 return r"<%s %s>" % (type(self).__name__, str(self))
84 84
85 85 def __eq__(self, other):
86 86 try:
87 87 return type(self) == type(other) and self._rev == other._rev
88 88 except AttributeError:
89 89 return False
90 90
91 91 def __ne__(self, other):
92 92 return not (self == other)
93 93
94 94 def __contains__(self, key):
95 95 return key in self._manifest
96 96
97 97 def __getitem__(self, key):
98 98 return self.filectx(key)
99 99
100 100 def __iter__(self):
101 101 return iter(self._manifest)
102 102
103 103 def _buildstatusmanifest(self, status):
104 104 """Builds a manifest that includes the given status results, if this is
105 105 a working copy context. For non-working copy contexts, it just returns
106 106 the normal manifest."""
107 107 return self.manifest()
108 108
109 109 def _matchstatus(self, other, match):
110 110 """This internal method provides a way for child objects to override the
111 111 match operator.
112 112 """
113 113 return match
114 114
115 115 def _buildstatus(self, other, s, match, listignored, listclean,
116 116 listunknown):
117 117 """build a status with respect to another context"""
118 118 # Load earliest manifest first for caching reasons. More specifically,
119 119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 122 # delta to what's in the cache. So that's one full reconstruction + one
123 123 # delta application.
124 124 mf2 = None
125 125 if self.rev() is not None and self.rev() < other.rev():
126 126 mf2 = self._buildstatusmanifest(s)
127 127 mf1 = other._buildstatusmanifest(s)
128 128 if mf2 is None:
129 129 mf2 = self._buildstatusmanifest(s)
130 130
131 131 modified, added = [], []
132 132 removed = []
133 133 clean = []
134 134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 135 deletedset = set(deleted)
136 136 d = mf1.diff(mf2, match=match, clean=listclean)
137 137 for fn, value in d.iteritems():
138 138 if fn in deletedset:
139 139 continue
140 140 if value is None:
141 141 clean.append(fn)
142 142 continue
143 143 (node1, flag1), (node2, flag2) = value
144 144 if node1 is None:
145 145 added.append(fn)
146 146 elif node2 is None:
147 147 removed.append(fn)
148 148 elif flag1 != flag2:
149 149 modified.append(fn)
150 150 elif node2 not in wdirnodes:
151 151 # When comparing files between two commits, we save time by
152 152 # not comparing the file contents when the nodeids differ.
153 153 # Note that this means we incorrectly report a reverted change
154 154 # to a file as a modification.
155 155 modified.append(fn)
156 156 elif self[fn].cmp(other[fn]):
157 157 modified.append(fn)
158 158 else:
159 159 clean.append(fn)
160 160
161 161 if removed:
162 162 # need to filter files if they are already reported as removed
163 163 unknown = [fn for fn in unknown if fn not in mf1 and
164 164 (not match or match(fn))]
165 165 ignored = [fn for fn in ignored if fn not in mf1 and
166 166 (not match or match(fn))]
167 167 # if they're deleted, don't report them as removed
168 168 removed = [fn for fn in removed if fn not in deletedset]
169 169
170 170 return scmutil.status(modified, added, removed, deleted, unknown,
171 171 ignored, clean)
172 172
173 173 @propertycache
174 174 def substate(self):
175 175 return subrepo.state(self, self._repo.ui)
176 176
177 177 def subrev(self, subpath):
178 178 return self.substate[subpath][1]
179 179
180 180 def rev(self):
181 181 return self._rev
182 182 def node(self):
183 183 return self._node
184 184 def hex(self):
185 185 return hex(self.node())
186 186 def manifest(self):
187 187 return self._manifest
188 188 def manifestctx(self):
189 189 return self._manifestctx
190 190 def repo(self):
191 191 return self._repo
192 192 def phasestr(self):
193 193 return phases.phasenames[self.phase()]
194 194 def mutable(self):
195 195 return self.phase() > phases.public
196 196
197 197 def getfileset(self, expr):
198 198 return fileset.getfileset(self, expr)
199 199
200 200 def obsolete(self):
201 201 """True if the changeset is obsolete"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203 203
204 204 def extinct(self):
205 205 """True if the changeset is extinct"""
206 206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207 207
208 208 def unstable(self):
209 209 msg = ("'context.unstable' is deprecated, "
210 210 "use 'context.orphan'")
211 211 self._repo.ui.deprecwarn(msg, '4.4')
212 212 return self.orphan()
213 213
214 214 def orphan(self):
215 215 """True if the changeset is not obsolete but it's ancestor are"""
216 216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
217 217
218 218 def bumped(self):
219 219 msg = ("'context.bumped' is deprecated, "
220 220 "use 'context.phasedivergent'")
221 221 self._repo.ui.deprecwarn(msg, '4.4')
222 222 return self.phasedivergent()
223 223
224 224 def phasedivergent(self):
225 225 """True if the changeset try to be a successor of a public changeset
226 226
227 227 Only non-public and non-obsolete changesets may be bumped.
228 228 """
229 229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
230 230
231 231 def divergent(self):
232 232 msg = ("'context.divergent' is deprecated, "
233 233 "use 'context.contentdivergent'")
234 234 self._repo.ui.deprecwarn(msg, '4.4')
235 235 return self.contentdivergent()
236 236
237 237 def contentdivergent(self):
238 238 """Is a successors of a changeset with multiple possible successors set
239 239
240 240 Only non-public and non-obsolete changesets may be divergent.
241 241 """
242 242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
243 243
244 244 def troubled(self):
245 245 msg = ("'context.troubled' is deprecated, "
246 246 "use 'context.isunstable'")
247 247 self._repo.ui.deprecwarn(msg, '4.4')
248 248 return self.isunstable()
249 249
250 250 def isunstable(self):
251 251 """True if the changeset is either unstable, bumped or divergent"""
252 252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
253 253
254 254 def troubles(self):
255 255 """Keep the old version around in order to avoid breaking extensions
256 256 about different return values.
257 257 """
258 258 msg = ("'context.troubles' is deprecated, "
259 259 "use 'context.instabilities'")
260 260 self._repo.ui.deprecwarn(msg, '4.4')
261 261
262 262 troubles = []
263 263 if self.orphan():
264 264 troubles.append('orphan')
265 265 if self.phasedivergent():
266 266 troubles.append('bumped')
267 267 if self.contentdivergent():
268 268 troubles.append('divergent')
269 269 return troubles
270 270
271 271 def instabilities(self):
272 272 """return the list of instabilities affecting this changeset.
273 273
274 274 Instabilities are returned as strings. possible values are:
275 275 - orphan,
276 276 - phase-divergent,
277 277 - content-divergent.
278 278 """
279 279 instabilities = []
280 280 if self.orphan():
281 281 instabilities.append('orphan')
282 282 if self.phasedivergent():
283 283 instabilities.append('phase-divergent')
284 284 if self.contentdivergent():
285 285 instabilities.append('content-divergent')
286 286 return instabilities
287 287
288 288 def parents(self):
289 289 """return contexts for each parent changeset"""
290 290 return self._parents
291 291
292 292 def p1(self):
293 293 return self._parents[0]
294 294
295 295 def p2(self):
296 296 parents = self._parents
297 297 if len(parents) == 2:
298 298 return parents[1]
299 299 return changectx(self._repo, nullrev)
300 300
301 301 def _fileinfo(self, path):
302 302 if r'_manifest' in self.__dict__:
303 303 try:
304 304 return self._manifest[path], self._manifest.flags(path)
305 305 except KeyError:
306 306 raise error.ManifestLookupError(self._node, path,
307 307 _('not found in manifest'))
308 308 if r'_manifestdelta' in self.__dict__ or path in self.files():
309 309 if path in self._manifestdelta:
310 310 return (self._manifestdelta[path],
311 311 self._manifestdelta.flags(path))
312 312 mfl = self._repo.manifestlog
313 313 try:
314 314 node, flag = mfl[self._changeset.manifest].find(path)
315 315 except KeyError:
316 316 raise error.ManifestLookupError(self._node, path,
317 317 _('not found in manifest'))
318 318
319 319 return node, flag
320 320
321 321 def filenode(self, path):
322 322 return self._fileinfo(path)[0]
323 323
324 324 def flags(self, path):
325 325 try:
326 326 return self._fileinfo(path)[1]
327 327 except error.LookupError:
328 328 return ''
329 329
330 330 def sub(self, path, allowcreate=True):
331 331 '''return a subrepo for the stored revision of path, never wdir()'''
332 332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
333 333
334 334 def nullsub(self, path, pctx):
335 335 return subrepo.nullsubrepo(self, path, pctx)
336 336
337 337 def workingsub(self, path):
338 338 '''return a subrepo for the stored revision, or wdir if this is a wdir
339 339 context.
340 340 '''
341 341 return subrepo.subrepo(self, path, allowwdir=True)
342 342
343 343 def match(self, pats=None, include=None, exclude=None, default='glob',
344 344 listsubrepos=False, badfn=None):
345 345 r = self._repo
346 346 return matchmod.match(r.root, r.getcwd(), pats,
347 347 include, exclude, default,
348 348 auditor=r.nofsauditor, ctx=self,
349 349 listsubrepos=listsubrepos, badfn=badfn)
350 350
351 351 def diff(self, ctx2=None, match=None, **opts):
352 352 """Returns a diff generator for the given contexts and matcher"""
353 353 if ctx2 is None:
354 354 ctx2 = self.p1()
355 355 if ctx2 is not None:
356 356 ctx2 = self._repo[ctx2]
357 357 diffopts = patch.diffopts(self._repo.ui, opts)
358 358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
359 359
360 360 def dirs(self):
361 361 return self._manifest.dirs()
362 362
363 363 def hasdir(self, dir):
364 364 return self._manifest.hasdir(dir)
365 365
366 366 def status(self, other=None, match=None, listignored=False,
367 367 listclean=False, listunknown=False, listsubrepos=False):
368 368 """return status of files between two nodes or node and working
369 369 directory.
370 370
371 371 If other is None, compare this node with working directory.
372 372
373 373 returns (modified, added, removed, deleted, unknown, ignored, clean)
374 374 """
375 375
376 376 ctx1 = self
377 377 ctx2 = self._repo[other]
378 378
379 379 # This next code block is, admittedly, fragile logic that tests for
380 380 # reversing the contexts and wouldn't need to exist if it weren't for
381 381 # the fast (and common) code path of comparing the working directory
382 382 # with its first parent.
383 383 #
384 384 # What we're aiming for here is the ability to call:
385 385 #
386 386 # workingctx.status(parentctx)
387 387 #
388 388 # If we always built the manifest for each context and compared those,
389 389 # then we'd be done. But the special case of the above call means we
390 390 # just copy the manifest of the parent.
391 391 reversed = False
392 392 if (not isinstance(ctx1, changectx)
393 393 and isinstance(ctx2, changectx)):
394 394 reversed = True
395 395 ctx1, ctx2 = ctx2, ctx1
396 396
397 397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
398 398 match = ctx2._matchstatus(ctx1, match)
399 399 r = scmutil.status([], [], [], [], [], [], [])
400 400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
401 401 listunknown)
402 402
403 403 if reversed:
404 404 # Reverse added and removed. Clear deleted, unknown and ignored as
405 405 # these make no sense to reverse.
406 406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
407 407 r.clean)
408 408
409 409 if listsubrepos:
410 410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
411 411 try:
412 412 rev2 = ctx2.subrev(subpath)
413 413 except KeyError:
414 414 # A subrepo that existed in node1 was deleted between
415 415 # node1 and node2 (inclusive). Thus, ctx2's substate
416 416 # won't contain that subpath. The best we can do ignore it.
417 417 rev2 = None
418 418 submatch = matchmod.subdirmatcher(subpath, match)
419 419 s = sub.status(rev2, match=submatch, ignored=listignored,
420 420 clean=listclean, unknown=listunknown,
421 421 listsubrepos=True)
422 422 for rfiles, sfiles in zip(r, s):
423 423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
424 424
425 425 for l in r:
426 426 l.sort()
427 427
428 428 return r
429 429
430 430 def _filterederror(repo, changeid):
431 431 """build an exception to be raised about a filtered changeid
432 432
433 433 This is extracted in a function to help extensions (eg: evolve) to
434 434 experiment with various message variants."""
435 435 if repo.filtername.startswith('visible'):
436 436 msg = _("hidden revision '%s'") % changeid
437 437 hint = _('use --hidden to access hidden revisions')
438 438 return error.FilteredRepoLookupError(msg, hint=hint)
439 439 msg = _("filtered revision '%s' (not in '%s' subset)")
440 440 msg %= (changeid, repo.filtername)
441 441 return error.FilteredRepoLookupError(msg)
442 442
443 443 class changectx(basectx):
444 444 """A changecontext object makes access to data related to a particular
445 445 changeset convenient. It represents a read-only context already present in
446 446 the repo."""
447 447 def __init__(self, repo, changeid=''):
448 448 """changeid is a revision number, node, or tag"""
449 449
450 450 # since basectx.__new__ already took care of copying the object, we
451 451 # don't need to do anything in __init__, so we just exit here
452 452 if isinstance(changeid, basectx):
453 453 return
454 454
455 455 if changeid == '':
456 456 changeid = '.'
457 457 self._repo = repo
458 458
459 459 try:
460 460 if isinstance(changeid, int):
461 461 self._node = repo.changelog.node(changeid)
462 462 self._rev = changeid
463 463 return
464 464 if not pycompat.ispy3 and isinstance(changeid, long):
465 465 changeid = str(changeid)
466 466 if changeid == 'null':
467 467 self._node = nullid
468 468 self._rev = nullrev
469 469 return
470 470 if changeid == 'tip':
471 471 self._node = repo.changelog.tip()
472 472 self._rev = repo.changelog.rev(self._node)
473 473 return
474 if changeid == '.' or changeid == repo.dirstate.p1():
474 if (changeid == '.'
475 or repo.local() and changeid == repo.dirstate.p1()):
475 476 # this is a hack to delay/avoid loading obsmarkers
476 477 # when we know that '.' won't be hidden
477 478 self._node = repo.dirstate.p1()
478 479 self._rev = repo.unfiltered().changelog.rev(self._node)
479 480 return
480 481 if len(changeid) == 20:
481 482 try:
482 483 self._node = changeid
483 484 self._rev = repo.changelog.rev(changeid)
484 485 return
485 486 except error.FilteredRepoLookupError:
486 487 raise
487 488 except LookupError:
488 489 pass
489 490
490 491 try:
491 492 r = int(changeid)
492 493 if '%d' % r != changeid:
493 494 raise ValueError
494 495 l = len(repo.changelog)
495 496 if r < 0:
496 497 r += l
497 498 if r < 0 or r >= l and r != wdirrev:
498 499 raise ValueError
499 500 self._rev = r
500 501 self._node = repo.changelog.node(r)
501 502 return
502 503 except error.FilteredIndexError:
503 504 raise
504 505 except (ValueError, OverflowError, IndexError):
505 506 pass
506 507
507 508 if len(changeid) == 40:
508 509 try:
509 510 self._node = bin(changeid)
510 511 self._rev = repo.changelog.rev(self._node)
511 512 return
512 513 except error.FilteredLookupError:
513 514 raise
514 515 except (TypeError, LookupError):
515 516 pass
516 517
517 518 # lookup bookmarks through the name interface
518 519 try:
519 520 self._node = repo.names.singlenode(repo, changeid)
520 521 self._rev = repo.changelog.rev(self._node)
521 522 return
522 523 except KeyError:
523 524 pass
524 525 except error.FilteredRepoLookupError:
525 526 raise
526 527 except error.RepoLookupError:
527 528 pass
528 529
529 530 self._node = repo.unfiltered().changelog._partialmatch(changeid)
530 531 if self._node is not None:
531 532 self._rev = repo.changelog.rev(self._node)
532 533 return
533 534
534 535 # lookup failed
535 536 # check if it might have come from damaged dirstate
536 537 #
537 538 # XXX we could avoid the unfiltered if we had a recognizable
538 539 # exception for filtered changeset access
539 if changeid in repo.unfiltered().dirstate.parents():
540 if (repo.local()
541 and changeid in repo.unfiltered().dirstate.parents()):
540 542 msg = _("working directory has unknown parent '%s'!")
541 543 raise error.Abort(msg % short(changeid))
542 544 try:
543 545 if len(changeid) == 20 and nonascii(changeid):
544 546 changeid = hex(changeid)
545 547 except TypeError:
546 548 pass
547 549 except (error.FilteredIndexError, error.FilteredLookupError,
548 550 error.FilteredRepoLookupError):
549 551 raise _filterederror(repo, changeid)
550 552 except IndexError:
551 553 pass
552 554 raise error.RepoLookupError(
553 555 _("unknown revision '%s'") % changeid)
554 556
555 557 def __hash__(self):
556 558 try:
557 559 return hash(self._rev)
558 560 except AttributeError:
559 561 return id(self)
560 562
561 563 def __nonzero__(self):
562 564 return self._rev != nullrev
563 565
564 566 __bool__ = __nonzero__
565 567
566 568 @propertycache
567 569 def _changeset(self):
568 570 return self._repo.changelog.changelogrevision(self.rev())
569 571
570 572 @propertycache
571 573 def _manifest(self):
572 574 return self._manifestctx.read()
573 575
574 576 @property
575 577 def _manifestctx(self):
576 578 return self._repo.manifestlog[self._changeset.manifest]
577 579
578 580 @propertycache
579 581 def _manifestdelta(self):
580 582 return self._manifestctx.readdelta()
581 583
582 584 @propertycache
583 585 def _parents(self):
584 586 repo = self._repo
585 587 p1, p2 = repo.changelog.parentrevs(self._rev)
586 588 if p2 == nullrev:
587 589 return [changectx(repo, p1)]
588 590 return [changectx(repo, p1), changectx(repo, p2)]
589 591
590 592 def changeset(self):
591 593 c = self._changeset
592 594 return (
593 595 c.manifest,
594 596 c.user,
595 597 c.date,
596 598 c.files,
597 599 c.description,
598 600 c.extra,
599 601 )
600 602 def manifestnode(self):
601 603 return self._changeset.manifest
602 604
603 605 def user(self):
604 606 return self._changeset.user
605 607 def date(self):
606 608 return self._changeset.date
607 609 def files(self):
608 610 return self._changeset.files
609 611 def description(self):
610 612 return self._changeset.description
611 613 def branch(self):
612 614 return encoding.tolocal(self._changeset.extra.get("branch"))
613 615 def closesbranch(self):
614 616 return 'close' in self._changeset.extra
615 617 def extra(self):
616 618 return self._changeset.extra
617 619 def tags(self):
618 620 return self._repo.nodetags(self._node)
619 621 def bookmarks(self):
620 622 return self._repo.nodebookmarks(self._node)
621 623 def phase(self):
622 624 return self._repo._phasecache.phase(self._repo, self._rev)
623 625 def hidden(self):
624 626 return self._rev in repoview.filterrevs(self._repo, 'visible')
625 627
626 628 def isinmemory(self):
627 629 return False
628 630
629 631 def children(self):
630 632 """return contexts for each child changeset"""
631 633 c = self._repo.changelog.children(self._node)
632 634 return [changectx(self._repo, x) for x in c]
633 635
634 636 def ancestors(self):
635 637 for a in self._repo.changelog.ancestors([self._rev]):
636 638 yield changectx(self._repo, a)
637 639
638 640 def descendants(self):
639 641 for d in self._repo.changelog.descendants([self._rev]):
640 642 yield changectx(self._repo, d)
641 643
642 644 def filectx(self, path, fileid=None, filelog=None):
643 645 """get a file context from this changeset"""
644 646 if fileid is None:
645 647 fileid = self.filenode(path)
646 648 return filectx(self._repo, path, fileid=fileid,
647 649 changectx=self, filelog=filelog)
648 650
649 651 def ancestor(self, c2, warn=False):
650 652 """return the "best" ancestor context of self and c2
651 653
652 654 If there are multiple candidates, it will show a message and check
653 655 merge.preferancestor configuration before falling back to the
654 656 revlog ancestor."""
655 657 # deal with workingctxs
656 658 n2 = c2._node
657 659 if n2 is None:
658 660 n2 = c2._parents[0]._node
659 661 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
660 662 if not cahs:
661 663 anc = nullid
662 664 elif len(cahs) == 1:
663 665 anc = cahs[0]
664 666 else:
665 667 # experimental config: merge.preferancestor
666 668 for r in self._repo.ui.configlist('merge', 'preferancestor'):
667 669 try:
668 670 ctx = changectx(self._repo, r)
669 671 except error.RepoLookupError:
670 672 continue
671 673 anc = ctx.node()
672 674 if anc in cahs:
673 675 break
674 676 else:
675 677 anc = self._repo.changelog.ancestor(self._node, n2)
676 678 if warn:
677 679 self._repo.ui.status(
678 680 (_("note: using %s as ancestor of %s and %s\n") %
679 681 (short(anc), short(self._node), short(n2))) +
680 682 ''.join(_(" alternatively, use --config "
681 683 "merge.preferancestor=%s\n") %
682 684 short(n) for n in sorted(cahs) if n != anc))
683 685 return changectx(self._repo, anc)
684 686
685 687 def descendant(self, other):
686 688 """True if other is descendant of this changeset"""
687 689 return self._repo.changelog.descendant(self._rev, other._rev)
688 690
689 691 def walk(self, match):
690 692 '''Generates matching file names.'''
691 693
692 694 # Wrap match.bad method to have message with nodeid
693 695 def bad(fn, msg):
694 696 # The manifest doesn't know about subrepos, so don't complain about
695 697 # paths into valid subrepos.
696 698 if any(fn == s or fn.startswith(s + '/')
697 699 for s in self.substate):
698 700 return
699 701 match.bad(fn, _('no such file in rev %s') % self)
700 702
701 703 m = matchmod.badmatch(match, bad)
702 704 return self._manifest.walk(m)
703 705
704 706 def matches(self, match):
705 707 return self.walk(match)
706 708
707 709 class basefilectx(object):
708 710 """A filecontext object represents the common logic for its children:
709 711 filectx: read-only access to a filerevision that is already present
710 712 in the repo,
711 713 workingfilectx: a filecontext that represents files from the working
712 714 directory,
713 715 memfilectx: a filecontext that represents files in-memory,
714 716 overlayfilectx: duplicate another filecontext with some fields overridden.
715 717 """
716 718 @propertycache
717 719 def _filelog(self):
718 720 return self._repo.file(self._path)
719 721
720 722 @propertycache
721 723 def _changeid(self):
722 724 if r'_changeid' in self.__dict__:
723 725 return self._changeid
724 726 elif r'_changectx' in self.__dict__:
725 727 return self._changectx.rev()
726 728 elif r'_descendantrev' in self.__dict__:
727 729 # this file context was created from a revision with a known
728 730 # descendant, we can (lazily) correct for linkrev aliases
729 731 return self._adjustlinkrev(self._descendantrev)
730 732 else:
731 733 return self._filelog.linkrev(self._filerev)
732 734
733 735 @propertycache
734 736 def _filenode(self):
735 737 if r'_fileid' in self.__dict__:
736 738 return self._filelog.lookup(self._fileid)
737 739 else:
738 740 return self._changectx.filenode(self._path)
739 741
740 742 @propertycache
741 743 def _filerev(self):
742 744 return self._filelog.rev(self._filenode)
743 745
744 746 @propertycache
745 747 def _repopath(self):
746 748 return self._path
747 749
748 750 def __nonzero__(self):
749 751 try:
750 752 self._filenode
751 753 return True
752 754 except error.LookupError:
753 755 # file is missing
754 756 return False
755 757
756 758 __bool__ = __nonzero__
757 759
758 760 def __bytes__(self):
759 761 try:
760 762 return "%s@%s" % (self.path(), self._changectx)
761 763 except error.LookupError:
762 764 return "%s@???" % self.path()
763 765
764 766 __str__ = encoding.strmethod(__bytes__)
765 767
766 768 def __repr__(self):
767 769 return "<%s %s>" % (type(self).__name__, str(self))
768 770
769 771 def __hash__(self):
770 772 try:
771 773 return hash((self._path, self._filenode))
772 774 except AttributeError:
773 775 return id(self)
774 776
775 777 def __eq__(self, other):
776 778 try:
777 779 return (type(self) == type(other) and self._path == other._path
778 780 and self._filenode == other._filenode)
779 781 except AttributeError:
780 782 return False
781 783
782 784 def __ne__(self, other):
783 785 return not (self == other)
784 786
785 787 def filerev(self):
786 788 return self._filerev
787 789 def filenode(self):
788 790 return self._filenode
789 791 @propertycache
790 792 def _flags(self):
791 793 return self._changectx.flags(self._path)
792 794 def flags(self):
793 795 return self._flags
794 796 def filelog(self):
795 797 return self._filelog
796 798 def rev(self):
797 799 return self._changeid
798 800 def linkrev(self):
799 801 return self._filelog.linkrev(self._filerev)
800 802 def node(self):
801 803 return self._changectx.node()
802 804 def hex(self):
803 805 return self._changectx.hex()
804 806 def user(self):
805 807 return self._changectx.user()
806 808 def date(self):
807 809 return self._changectx.date()
808 810 def files(self):
809 811 return self._changectx.files()
810 812 def description(self):
811 813 return self._changectx.description()
812 814 def branch(self):
813 815 return self._changectx.branch()
814 816 def extra(self):
815 817 return self._changectx.extra()
816 818 def phase(self):
817 819 return self._changectx.phase()
818 820 def phasestr(self):
819 821 return self._changectx.phasestr()
820 822 def manifest(self):
821 823 return self._changectx.manifest()
822 824 def changectx(self):
823 825 return self._changectx
824 826 def renamed(self):
825 827 return self._copied
826 828 def repo(self):
827 829 return self._repo
828 830 def size(self):
829 831 return len(self.data())
830 832
831 833 def path(self):
832 834 return self._path
833 835
834 836 def isbinary(self):
835 837 try:
836 838 return util.binary(self.data())
837 839 except IOError:
838 840 return False
839 841 def isexec(self):
840 842 return 'x' in self.flags()
841 843 def islink(self):
842 844 return 'l' in self.flags()
843 845
844 846 def isabsent(self):
845 847 """whether this filectx represents a file not in self._changectx
846 848
847 849 This is mainly for merge code to detect change/delete conflicts. This is
848 850 expected to be True for all subclasses of basectx."""
849 851 return False
850 852
851 853 _customcmp = False
852 854 def cmp(self, fctx):
853 855 """compare with other file context
854 856
855 857 returns True if different than fctx.
856 858 """
857 859 if fctx._customcmp:
858 860 return fctx.cmp(self)
859 861
860 862 if (fctx._filenode is None
861 863 and (self._repo._encodefilterpats
862 864 # if file data starts with '\1\n', empty metadata block is
863 865 # prepended, which adds 4 bytes to filelog.size().
864 866 or self.size() - 4 == fctx.size())
865 867 or self.size() == fctx.size()):
866 868 return self._filelog.cmp(self._filenode, fctx.data())
867 869
868 870 return True
869 871
870 872 def _adjustlinkrev(self, srcrev, inclusive=False):
871 873 """return the first ancestor of <srcrev> introducing <fnode>
872 874
873 875 If the linkrev of the file revision does not point to an ancestor of
874 876 srcrev, we'll walk down the ancestors until we find one introducing
875 877 this file revision.
876 878
877 879 :srcrev: the changeset revision we search ancestors from
878 880 :inclusive: if true, the src revision will also be checked
879 881 """
880 882 repo = self._repo
881 883 cl = repo.unfiltered().changelog
882 884 mfl = repo.manifestlog
883 885 # fetch the linkrev
884 886 lkr = self.linkrev()
885 887 # hack to reuse ancestor computation when searching for renames
886 888 memberanc = getattr(self, '_ancestrycontext', None)
887 889 iteranc = None
888 890 if srcrev is None:
889 891 # wctx case, used by workingfilectx during mergecopy
890 892 revs = [p.rev() for p in self._repo[None].parents()]
891 893 inclusive = True # we skipped the real (revless) source
892 894 else:
893 895 revs = [srcrev]
894 896 if memberanc is None:
895 897 memberanc = iteranc = cl.ancestors(revs, lkr,
896 898 inclusive=inclusive)
897 899 # check if this linkrev is an ancestor of srcrev
898 900 if lkr not in memberanc:
899 901 if iteranc is None:
900 902 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
901 903 fnode = self._filenode
902 904 path = self._path
903 905 for a in iteranc:
904 906 ac = cl.read(a) # get changeset data (we avoid object creation)
905 907 if path in ac[3]: # checking the 'files' field.
906 908 # The file has been touched, check if the content is
907 909 # similar to the one we search for.
908 910 if fnode == mfl[ac[0]].readfast().get(path):
909 911 return a
910 912 # In theory, we should never get out of that loop without a result.
911 913 # But if manifest uses a buggy file revision (not children of the
912 914 # one it replaces) we could. Such a buggy situation will likely
913 915 # result is crash somewhere else at to some point.
914 916 return lkr
915 917
916 918 def introrev(self):
917 919 """return the rev of the changeset which introduced this file revision
918 920
919 921 This method is different from linkrev because it take into account the
920 922 changeset the filectx was created from. It ensures the returned
921 923 revision is one of its ancestors. This prevents bugs from
922 924 'linkrev-shadowing' when a file revision is used by multiple
923 925 changesets.
924 926 """
925 927 lkr = self.linkrev()
926 928 attrs = vars(self)
927 929 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
928 930 if noctx or self.rev() == lkr:
929 931 return self.linkrev()
930 932 return self._adjustlinkrev(self.rev(), inclusive=True)
931 933
932 934 def _parentfilectx(self, path, fileid, filelog):
933 935 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
934 936 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
935 937 if '_changeid' in vars(self) or '_changectx' in vars(self):
936 938 # If self is associated with a changeset (probably explicitly
937 939 # fed), ensure the created filectx is associated with a
938 940 # changeset that is an ancestor of self.changectx.
939 941 # This lets us later use _adjustlinkrev to get a correct link.
940 942 fctx._descendantrev = self.rev()
941 943 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 944 elif '_descendantrev' in vars(self):
943 945 # Otherwise propagate _descendantrev if we have one associated.
944 946 fctx._descendantrev = self._descendantrev
945 947 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
946 948 return fctx
947 949
948 950 def parents(self):
949 951 _path = self._path
950 952 fl = self._filelog
951 953 parents = self._filelog.parents(self._filenode)
952 954 pl = [(_path, node, fl) for node in parents if node != nullid]
953 955
954 956 r = fl.renamed(self._filenode)
955 957 if r:
956 958 # - In the simple rename case, both parent are nullid, pl is empty.
957 959 # - In case of merge, only one of the parent is null id and should
958 960 # be replaced with the rename information. This parent is -always-
959 961 # the first one.
960 962 #
961 963 # As null id have always been filtered out in the previous list
962 964 # comprehension, inserting to 0 will always result in "replacing
963 965 # first nullid parent with rename information.
964 966 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
965 967
966 968 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
967 969
968 970 def p1(self):
969 971 return self.parents()[0]
970 972
971 973 def p2(self):
972 974 p = self.parents()
973 975 if len(p) == 2:
974 976 return p[1]
975 977 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
976 978
977 979 def annotate(self, follow=False, linenumber=False, skiprevs=None,
978 980 diffopts=None):
979 981 '''returns a list of tuples of ((ctx, number), line) for each line
980 982 in the file, where ctx is the filectx of the node where
981 983 that line was last changed; if linenumber parameter is true, number is
982 984 the line number at the first appearance in the managed file, otherwise,
983 985 number has a fixed value of False.
984 986 '''
985 987
986 988 def lines(text):
987 989 if text.endswith("\n"):
988 990 return text.count("\n")
989 991 return text.count("\n") + int(bool(text))
990 992
991 993 if linenumber:
992 994 def decorate(text, rev):
993 995 return ([annotateline(fctx=rev, lineno=i)
994 996 for i in xrange(1, lines(text) + 1)], text)
995 997 else:
996 998 def decorate(text, rev):
997 999 return ([annotateline(fctx=rev)] * lines(text), text)
998 1000
999 1001 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1000 1002
1001 1003 def parents(f):
1002 1004 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1003 1005 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1004 1006 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1005 1007 # isn't an ancestor of the srcrev.
1006 1008 f._changeid
1007 1009 pl = f.parents()
1008 1010
1009 1011 # Don't return renamed parents if we aren't following.
1010 1012 if not follow:
1011 1013 pl = [p for p in pl if p.path() == f.path()]
1012 1014
1013 1015 # renamed filectx won't have a filelog yet, so set it
1014 1016 # from the cache to save time
1015 1017 for p in pl:
1016 1018 if not '_filelog' in p.__dict__:
1017 1019 p._filelog = getlog(p.path())
1018 1020
1019 1021 return pl
1020 1022
1021 1023 # use linkrev to find the first changeset where self appeared
1022 1024 base = self
1023 1025 introrev = self.introrev()
1024 1026 if self.rev() != introrev:
1025 1027 base = self.filectx(self.filenode(), changeid=introrev)
1026 1028 if getattr(base, '_ancestrycontext', None) is None:
1027 1029 cl = self._repo.changelog
1028 1030 if introrev is None:
1029 1031 # wctx is not inclusive, but works because _ancestrycontext
1030 1032 # is used to test filelog revisions
1031 1033 ac = cl.ancestors([p.rev() for p in base.parents()],
1032 1034 inclusive=True)
1033 1035 else:
1034 1036 ac = cl.ancestors([introrev], inclusive=True)
1035 1037 base._ancestrycontext = ac
1036 1038
1037 1039 # This algorithm would prefer to be recursive, but Python is a
1038 1040 # bit recursion-hostile. Instead we do an iterative
1039 1041 # depth-first search.
1040 1042
1041 1043 # 1st DFS pre-calculates pcache and needed
1042 1044 visit = [base]
1043 1045 pcache = {}
1044 1046 needed = {base: 1}
1045 1047 while visit:
1046 1048 f = visit.pop()
1047 1049 if f in pcache:
1048 1050 continue
1049 1051 pl = parents(f)
1050 1052 pcache[f] = pl
1051 1053 for p in pl:
1052 1054 needed[p] = needed.get(p, 0) + 1
1053 1055 if p not in pcache:
1054 1056 visit.append(p)
1055 1057
1056 1058 # 2nd DFS does the actual annotate
1057 1059 visit[:] = [base]
1058 1060 hist = {}
1059 1061 while visit:
1060 1062 f = visit[-1]
1061 1063 if f in hist:
1062 1064 visit.pop()
1063 1065 continue
1064 1066
1065 1067 ready = True
1066 1068 pl = pcache[f]
1067 1069 for p in pl:
1068 1070 if p not in hist:
1069 1071 ready = False
1070 1072 visit.append(p)
1071 1073 if ready:
1072 1074 visit.pop()
1073 1075 curr = decorate(f.data(), f)
1074 1076 skipchild = False
1075 1077 if skiprevs is not None:
1076 1078 skipchild = f._changeid in skiprevs
1077 1079 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1078 1080 diffopts)
1079 1081 for p in pl:
1080 1082 if needed[p] == 1:
1081 1083 del hist[p]
1082 1084 del needed[p]
1083 1085 else:
1084 1086 needed[p] -= 1
1085 1087
1086 1088 hist[f] = curr
1087 1089 del pcache[f]
1088 1090
1089 1091 return zip(hist[base][0], hist[base][1].splitlines(True))
1090 1092
1091 1093 def ancestors(self, followfirst=False):
1092 1094 visit = {}
1093 1095 c = self
1094 1096 if followfirst:
1095 1097 cut = 1
1096 1098 else:
1097 1099 cut = None
1098 1100
1099 1101 while True:
1100 1102 for parent in c.parents()[:cut]:
1101 1103 visit[(parent.linkrev(), parent.filenode())] = parent
1102 1104 if not visit:
1103 1105 break
1104 1106 c = visit.pop(max(visit))
1105 1107 yield c
1106 1108
1107 1109 def decodeddata(self):
1108 1110 """Returns `data()` after running repository decoding filters.
1109 1111
1110 1112 This is often equivalent to how the data would be expressed on disk.
1111 1113 """
1112 1114 return self._repo.wwritedata(self.path(), self.data())
1113 1115
1114 1116 @attr.s(slots=True, frozen=True)
1115 1117 class annotateline(object):
1116 1118 fctx = attr.ib()
1117 1119 lineno = attr.ib(default=False)
1118 1120 # Whether this annotation was the result of a skip-annotate.
1119 1121 skip = attr.ib(default=False)
1120 1122
1121 1123 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1122 1124 r'''
1123 1125 Given parent and child fctxes and annotate data for parents, for all lines
1124 1126 in either parent that match the child, annotate the child with the parent's
1125 1127 data.
1126 1128
1127 1129 Additionally, if `skipchild` is True, replace all other lines with parent
1128 1130 annotate data as well such that child is never blamed for any lines.
1129 1131
1130 1132 See test-annotate.py for unit tests.
1131 1133 '''
1132 1134 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1133 1135 for parent in parents]
1134 1136
1135 1137 if skipchild:
1136 1138 # Need to iterate over the blocks twice -- make it a list
1137 1139 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1138 1140 # Mercurial currently prefers p2 over p1 for annotate.
1139 1141 # TODO: change this?
1140 1142 for parent, blocks in pblocks:
1141 1143 for (a1, a2, b1, b2), t in blocks:
1142 1144 # Changed blocks ('!') or blocks made only of blank lines ('~')
1143 1145 # belong to the child.
1144 1146 if t == '=':
1145 1147 child[0][b1:b2] = parent[0][a1:a2]
1146 1148
1147 1149 if skipchild:
1148 1150 # Now try and match up anything that couldn't be matched,
1149 1151 # Reversing pblocks maintains bias towards p2, matching above
1150 1152 # behavior.
1151 1153 pblocks.reverse()
1152 1154
1153 1155 # The heuristics are:
1154 1156 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1155 1157 # This could potentially be smarter but works well enough.
1156 1158 # * For a non-matching section, do a best-effort fit. Match lines in
1157 1159 # diff hunks 1:1, dropping lines as necessary.
1158 1160 # * Repeat the last line as a last resort.
1159 1161
1160 1162 # First, replace as much as possible without repeating the last line.
1161 1163 remaining = [(parent, []) for parent, _blocks in pblocks]
1162 1164 for idx, (parent, blocks) in enumerate(pblocks):
1163 1165 for (a1, a2, b1, b2), _t in blocks:
1164 1166 if a2 - a1 >= b2 - b1:
1165 1167 for bk in xrange(b1, b2):
1166 1168 if child[0][bk].fctx == childfctx:
1167 1169 ak = min(a1 + (bk - b1), a2 - 1)
1168 1170 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1169 1171 else:
1170 1172 remaining[idx][1].append((a1, a2, b1, b2))
1171 1173
1172 1174 # Then, look at anything left, which might involve repeating the last
1173 1175 # line.
1174 1176 for parent, blocks in remaining:
1175 1177 for a1, a2, b1, b2 in blocks:
1176 1178 for bk in xrange(b1, b2):
1177 1179 if child[0][bk].fctx == childfctx:
1178 1180 ak = min(a1 + (bk - b1), a2 - 1)
1179 1181 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1180 1182 return child
1181 1183
1182 1184 class filectx(basefilectx):
1183 1185 """A filecontext object makes access to data related to a particular
1184 1186 filerevision convenient."""
1185 1187 def __init__(self, repo, path, changeid=None, fileid=None,
1186 1188 filelog=None, changectx=None):
1187 1189 """changeid can be a changeset revision, node, or tag.
1188 1190 fileid can be a file revision or node."""
1189 1191 self._repo = repo
1190 1192 self._path = path
1191 1193
1192 1194 assert (changeid is not None
1193 1195 or fileid is not None
1194 1196 or changectx is not None), \
1195 1197 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1196 1198 % (changeid, fileid, changectx))
1197 1199
1198 1200 if filelog is not None:
1199 1201 self._filelog = filelog
1200 1202
1201 1203 if changeid is not None:
1202 1204 self._changeid = changeid
1203 1205 if changectx is not None:
1204 1206 self._changectx = changectx
1205 1207 if fileid is not None:
1206 1208 self._fileid = fileid
1207 1209
1208 1210 @propertycache
1209 1211 def _changectx(self):
1210 1212 try:
1211 1213 return changectx(self._repo, self._changeid)
1212 1214 except error.FilteredRepoLookupError:
1213 1215 # Linkrev may point to any revision in the repository. When the
1214 1216 # repository is filtered this may lead to `filectx` trying to build
1215 1217 # `changectx` for filtered revision. In such case we fallback to
1216 1218 # creating `changectx` on the unfiltered version of the reposition.
1217 1219 # This fallback should not be an issue because `changectx` from
1218 1220 # `filectx` are not used in complex operations that care about
1219 1221 # filtering.
1220 1222 #
1221 1223 # This fallback is a cheap and dirty fix that prevent several
1222 1224 # crashes. It does not ensure the behavior is correct. However the
1223 1225 # behavior was not correct before filtering either and "incorrect
1224 1226 # behavior" is seen as better as "crash"
1225 1227 #
1226 1228 # Linkrevs have several serious troubles with filtering that are
1227 1229 # complicated to solve. Proper handling of the issue here should be
1228 1230 # considered when solving linkrev issue are on the table.
1229 1231 return changectx(self._repo.unfiltered(), self._changeid)
1230 1232
1231 1233 def filectx(self, fileid, changeid=None):
1232 1234 '''opens an arbitrary revision of the file without
1233 1235 opening a new filelog'''
1234 1236 return filectx(self._repo, self._path, fileid=fileid,
1235 1237 filelog=self._filelog, changeid=changeid)
1236 1238
1237 1239 def rawdata(self):
1238 1240 return self._filelog.revision(self._filenode, raw=True)
1239 1241
1240 1242 def rawflags(self):
1241 1243 """low-level revlog flags"""
1242 1244 return self._filelog.flags(self._filerev)
1243 1245
1244 1246 def data(self):
1245 1247 try:
1246 1248 return self._filelog.read(self._filenode)
1247 1249 except error.CensoredNodeError:
1248 1250 if self._repo.ui.config("censor", "policy") == "ignore":
1249 1251 return ""
1250 1252 raise error.Abort(_("censored node: %s") % short(self._filenode),
1251 1253 hint=_("set censor.policy to ignore errors"))
1252 1254
1253 1255 def size(self):
1254 1256 return self._filelog.size(self._filerev)
1255 1257
1256 1258 @propertycache
1257 1259 def _copied(self):
1258 1260 """check if file was actually renamed in this changeset revision
1259 1261
1260 1262 If rename logged in file revision, we report copy for changeset only
1261 1263 if file revisions linkrev points back to the changeset in question
1262 1264 or both changeset parents contain different file revisions.
1263 1265 """
1264 1266
1265 1267 renamed = self._filelog.renamed(self._filenode)
1266 1268 if not renamed:
1267 1269 return renamed
1268 1270
1269 1271 if self.rev() == self.linkrev():
1270 1272 return renamed
1271 1273
1272 1274 name = self.path()
1273 1275 fnode = self._filenode
1274 1276 for p in self._changectx.parents():
1275 1277 try:
1276 1278 if fnode == p.filenode(name):
1277 1279 return None
1278 1280 except error.LookupError:
1279 1281 pass
1280 1282 return renamed
1281 1283
1282 1284 def children(self):
1283 1285 # hard for renames
1284 1286 c = self._filelog.children(self._filenode)
1285 1287 return [filectx(self._repo, self._path, fileid=x,
1286 1288 filelog=self._filelog) for x in c]
1287 1289
1288 1290 class committablectx(basectx):
1289 1291 """A committablectx object provides common functionality for a context that
1290 1292 wants the ability to commit, e.g. workingctx or memctx."""
1291 1293 def __init__(self, repo, text="", user=None, date=None, extra=None,
1292 1294 changes=None):
1293 1295 self._repo = repo
1294 1296 self._rev = None
1295 1297 self._node = None
1296 1298 self._text = text
1297 1299 if date:
1298 1300 self._date = util.parsedate(date)
1299 1301 if user:
1300 1302 self._user = user
1301 1303 if changes:
1302 1304 self._status = changes
1303 1305
1304 1306 self._extra = {}
1305 1307 if extra:
1306 1308 self._extra = extra.copy()
1307 1309 if 'branch' not in self._extra:
1308 1310 try:
1309 1311 branch = encoding.fromlocal(self._repo.dirstate.branch())
1310 1312 except UnicodeDecodeError:
1311 1313 raise error.Abort(_('branch name not in UTF-8!'))
1312 1314 self._extra['branch'] = branch
1313 1315 if self._extra['branch'] == '':
1314 1316 self._extra['branch'] = 'default'
1315 1317
1316 1318 def __bytes__(self):
1317 1319 return bytes(self._parents[0]) + "+"
1318 1320
1319 1321 __str__ = encoding.strmethod(__bytes__)
1320 1322
1321 1323 def __nonzero__(self):
1322 1324 return True
1323 1325
1324 1326 __bool__ = __nonzero__
1325 1327
1326 1328 def _buildflagfunc(self):
1327 1329 # Create a fallback function for getting file flags when the
1328 1330 # filesystem doesn't support them
1329 1331
1330 1332 copiesget = self._repo.dirstate.copies().get
1331 1333 parents = self.parents()
1332 1334 if len(parents) < 2:
1333 1335 # when we have one parent, it's easy: copy from parent
1334 1336 man = parents[0].manifest()
1335 1337 def func(f):
1336 1338 f = copiesget(f, f)
1337 1339 return man.flags(f)
1338 1340 else:
1339 1341 # merges are tricky: we try to reconstruct the unstored
1340 1342 # result from the merge (issue1802)
1341 1343 p1, p2 = parents
1342 1344 pa = p1.ancestor(p2)
1343 1345 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1344 1346
1345 1347 def func(f):
1346 1348 f = copiesget(f, f) # may be wrong for merges with copies
1347 1349 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1348 1350 if fl1 == fl2:
1349 1351 return fl1
1350 1352 if fl1 == fla:
1351 1353 return fl2
1352 1354 if fl2 == fla:
1353 1355 return fl1
1354 1356 return '' # punt for conflicts
1355 1357
1356 1358 return func
1357 1359
1358 1360 @propertycache
1359 1361 def _flagfunc(self):
1360 1362 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1361 1363
1362 1364 @propertycache
1363 1365 def _status(self):
1364 1366 return self._repo.status()
1365 1367
1366 1368 @propertycache
1367 1369 def _user(self):
1368 1370 return self._repo.ui.username()
1369 1371
1370 1372 @propertycache
1371 1373 def _date(self):
1372 1374 ui = self._repo.ui
1373 1375 date = ui.configdate('devel', 'default-date')
1374 1376 if date is None:
1375 1377 date = util.makedate()
1376 1378 return date
1377 1379
1378 1380 def subrev(self, subpath):
1379 1381 return None
1380 1382
1381 1383 def manifestnode(self):
1382 1384 return None
1383 1385 def user(self):
1384 1386 return self._user or self._repo.ui.username()
1385 1387 def date(self):
1386 1388 return self._date
1387 1389 def description(self):
1388 1390 return self._text
1389 1391 def files(self):
1390 1392 return sorted(self._status.modified + self._status.added +
1391 1393 self._status.removed)
1392 1394
1393 1395 def modified(self):
1394 1396 return self._status.modified
1395 1397 def added(self):
1396 1398 return self._status.added
1397 1399 def removed(self):
1398 1400 return self._status.removed
1399 1401 def deleted(self):
1400 1402 return self._status.deleted
1401 1403 def branch(self):
1402 1404 return encoding.tolocal(self._extra['branch'])
1403 1405 def closesbranch(self):
1404 1406 return 'close' in self._extra
1405 1407 def extra(self):
1406 1408 return self._extra
1407 1409
1408 1410 def isinmemory(self):
1409 1411 return False
1410 1412
1411 1413 def tags(self):
1412 1414 return []
1413 1415
1414 1416 def bookmarks(self):
1415 1417 b = []
1416 1418 for p in self.parents():
1417 1419 b.extend(p.bookmarks())
1418 1420 return b
1419 1421
1420 1422 def phase(self):
1421 1423 phase = phases.draft # default phase to draft
1422 1424 for p in self.parents():
1423 1425 phase = max(phase, p.phase())
1424 1426 return phase
1425 1427
1426 1428 def hidden(self):
1427 1429 return False
1428 1430
1429 1431 def children(self):
1430 1432 return []
1431 1433
1432 1434 def flags(self, path):
1433 1435 if r'_manifest' in self.__dict__:
1434 1436 try:
1435 1437 return self._manifest.flags(path)
1436 1438 except KeyError:
1437 1439 return ''
1438 1440
1439 1441 try:
1440 1442 return self._flagfunc(path)
1441 1443 except OSError:
1442 1444 return ''
1443 1445
1444 1446 def ancestor(self, c2):
1445 1447 """return the "best" ancestor context of self and c2"""
1446 1448 return self._parents[0].ancestor(c2) # punt on two parents for now
1447 1449
1448 1450 def walk(self, match):
1449 1451 '''Generates matching file names.'''
1450 1452 return sorted(self._repo.dirstate.walk(match,
1451 1453 subrepos=sorted(self.substate),
1452 1454 unknown=True, ignored=False))
1453 1455
1454 1456 def matches(self, match):
1455 1457 return sorted(self._repo.dirstate.matches(match))
1456 1458
1457 1459 def ancestors(self):
1458 1460 for p in self._parents:
1459 1461 yield p
1460 1462 for a in self._repo.changelog.ancestors(
1461 1463 [p.rev() for p in self._parents]):
1462 1464 yield changectx(self._repo, a)
1463 1465
1464 1466 def markcommitted(self, node):
1465 1467 """Perform post-commit cleanup necessary after committing this ctx
1466 1468
1467 1469 Specifically, this updates backing stores this working context
1468 1470 wraps to reflect the fact that the changes reflected by this
1469 1471 workingctx have been committed. For example, it marks
1470 1472 modified and added files as normal in the dirstate.
1471 1473
1472 1474 """
1473 1475
1474 1476 with self._repo.dirstate.parentchange():
1475 1477 for f in self.modified() + self.added():
1476 1478 self._repo.dirstate.normal(f)
1477 1479 for f in self.removed():
1478 1480 self._repo.dirstate.drop(f)
1479 1481 self._repo.dirstate.setparents(node)
1480 1482
1481 1483 # write changes out explicitly, because nesting wlock at
1482 1484 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1483 1485 # from immediately doing so for subsequent changing files
1484 1486 self._repo.dirstate.write(self._repo.currenttransaction())
1485 1487
1486 1488 def dirty(self, missing=False, merge=True, branch=True):
1487 1489 return False
1488 1490
1489 1491 class workingctx(committablectx):
1490 1492 """A workingctx object makes access to data related to
1491 1493 the current working directory convenient.
1492 1494 date - any valid date string or (unixtime, offset), or None.
1493 1495 user - username string, or None.
1494 1496 extra - a dictionary of extra values, or None.
1495 1497 changes - a list of file lists as returned by localrepo.status()
1496 1498 or None to use the repository status.
1497 1499 """
1498 1500 def __init__(self, repo, text="", user=None, date=None, extra=None,
1499 1501 changes=None):
1500 1502 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1501 1503
1502 1504 def __iter__(self):
1503 1505 d = self._repo.dirstate
1504 1506 for f in d:
1505 1507 if d[f] != 'r':
1506 1508 yield f
1507 1509
1508 1510 def __contains__(self, key):
1509 1511 return self._repo.dirstate[key] not in "?r"
1510 1512
1511 1513 def hex(self):
1512 1514 return hex(wdirid)
1513 1515
1514 1516 @propertycache
1515 1517 def _parents(self):
1516 1518 p = self._repo.dirstate.parents()
1517 1519 if p[1] == nullid:
1518 1520 p = p[:-1]
1519 1521 return [changectx(self._repo, x) for x in p]
1520 1522
1521 1523 def filectx(self, path, filelog=None):
1522 1524 """get a file context from the working directory"""
1523 1525 return workingfilectx(self._repo, path, workingctx=self,
1524 1526 filelog=filelog)
1525 1527
1526 1528 def dirty(self, missing=False, merge=True, branch=True):
1527 1529 "check whether a working directory is modified"
1528 1530 # check subrepos first
1529 1531 for s in sorted(self.substate):
1530 1532 if self.sub(s).dirty(missing=missing):
1531 1533 return True
1532 1534 # check current working dir
1533 1535 return ((merge and self.p2()) or
1534 1536 (branch and self.branch() != self.p1().branch()) or
1535 1537 self.modified() or self.added() or self.removed() or
1536 1538 (missing and self.deleted()))
1537 1539
1538 1540 def add(self, list, prefix=""):
1539 1541 with self._repo.wlock():
1540 1542 ui, ds = self._repo.ui, self._repo.dirstate
1541 1543 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1542 1544 rejected = []
1543 1545 lstat = self._repo.wvfs.lstat
1544 1546 for f in list:
1545 1547 # ds.pathto() returns an absolute file when this is invoked from
1546 1548 # the keyword extension. That gets flagged as non-portable on
1547 1549 # Windows, since it contains the drive letter and colon.
1548 1550 scmutil.checkportable(ui, os.path.join(prefix, f))
1549 1551 try:
1550 1552 st = lstat(f)
1551 1553 except OSError:
1552 1554 ui.warn(_("%s does not exist!\n") % uipath(f))
1553 1555 rejected.append(f)
1554 1556 continue
1555 1557 if st.st_size > 10000000:
1556 1558 ui.warn(_("%s: up to %d MB of RAM may be required "
1557 1559 "to manage this file\n"
1558 1560 "(use 'hg revert %s' to cancel the "
1559 1561 "pending addition)\n")
1560 1562 % (f, 3 * st.st_size // 1000000, uipath(f)))
1561 1563 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1562 1564 ui.warn(_("%s not added: only files and symlinks "
1563 1565 "supported currently\n") % uipath(f))
1564 1566 rejected.append(f)
1565 1567 elif ds[f] in 'amn':
1566 1568 ui.warn(_("%s already tracked!\n") % uipath(f))
1567 1569 elif ds[f] == 'r':
1568 1570 ds.normallookup(f)
1569 1571 else:
1570 1572 ds.add(f)
1571 1573 return rejected
1572 1574
1573 1575 def forget(self, files, prefix=""):
1574 1576 with self._repo.wlock():
1575 1577 ds = self._repo.dirstate
1576 1578 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 1579 rejected = []
1578 1580 for f in files:
1579 1581 if f not in self._repo.dirstate:
1580 1582 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1581 1583 rejected.append(f)
1582 1584 elif self._repo.dirstate[f] != 'a':
1583 1585 self._repo.dirstate.remove(f)
1584 1586 else:
1585 1587 self._repo.dirstate.drop(f)
1586 1588 return rejected
1587 1589
1588 1590 def undelete(self, list):
1589 1591 pctxs = self.parents()
1590 1592 with self._repo.wlock():
1591 1593 ds = self._repo.dirstate
1592 1594 for f in list:
1593 1595 if self._repo.dirstate[f] != 'r':
1594 1596 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1595 1597 else:
1596 1598 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1597 1599 t = fctx.data()
1598 1600 self._repo.wwrite(f, t, fctx.flags())
1599 1601 self._repo.dirstate.normal(f)
1600 1602
1601 1603 def copy(self, source, dest):
1602 1604 try:
1603 1605 st = self._repo.wvfs.lstat(dest)
1604 1606 except OSError as err:
1605 1607 if err.errno != errno.ENOENT:
1606 1608 raise
1607 1609 self._repo.ui.warn(_("%s does not exist!\n")
1608 1610 % self._repo.dirstate.pathto(dest))
1609 1611 return
1610 1612 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1611 1613 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1612 1614 "symbolic link\n")
1613 1615 % self._repo.dirstate.pathto(dest))
1614 1616 else:
1615 1617 with self._repo.wlock():
1616 1618 if self._repo.dirstate[dest] in '?':
1617 1619 self._repo.dirstate.add(dest)
1618 1620 elif self._repo.dirstate[dest] in 'r':
1619 1621 self._repo.dirstate.normallookup(dest)
1620 1622 self._repo.dirstate.copy(source, dest)
1621 1623
1622 1624 def match(self, pats=None, include=None, exclude=None, default='glob',
1623 1625 listsubrepos=False, badfn=None):
1624 1626 r = self._repo
1625 1627
1626 1628 # Only a case insensitive filesystem needs magic to translate user input
1627 1629 # to actual case in the filesystem.
1628 1630 icasefs = not util.fscasesensitive(r.root)
1629 1631 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1630 1632 default, auditor=r.auditor, ctx=self,
1631 1633 listsubrepos=listsubrepos, badfn=badfn,
1632 1634 icasefs=icasefs)
1633 1635
1634 1636 def flushall(self):
1635 1637 pass # For overlayworkingfilectx compatibility.
1636 1638
1637 1639 def _filtersuspectsymlink(self, files):
1638 1640 if not files or self._repo.dirstate._checklink:
1639 1641 return files
1640 1642
1641 1643 # Symlink placeholders may get non-symlink-like contents
1642 1644 # via user error or dereferencing by NFS or Samba servers,
1643 1645 # so we filter out any placeholders that don't look like a
1644 1646 # symlink
1645 1647 sane = []
1646 1648 for f in files:
1647 1649 if self.flags(f) == 'l':
1648 1650 d = self[f].data()
1649 1651 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1650 1652 self._repo.ui.debug('ignoring suspect symlink placeholder'
1651 1653 ' "%s"\n' % f)
1652 1654 continue
1653 1655 sane.append(f)
1654 1656 return sane
1655 1657
1656 1658 def _checklookup(self, files):
1657 1659 # check for any possibly clean files
1658 1660 if not files:
1659 1661 return [], [], []
1660 1662
1661 1663 modified = []
1662 1664 deleted = []
1663 1665 fixup = []
1664 1666 pctx = self._parents[0]
1665 1667 # do a full compare of any files that might have changed
1666 1668 for f in sorted(files):
1667 1669 try:
1668 1670 # This will return True for a file that got replaced by a
1669 1671 # directory in the interim, but fixing that is pretty hard.
1670 1672 if (f not in pctx or self.flags(f) != pctx.flags(f)
1671 1673 or pctx[f].cmp(self[f])):
1672 1674 modified.append(f)
1673 1675 else:
1674 1676 fixup.append(f)
1675 1677 except (IOError, OSError):
1676 1678 # A file become inaccessible in between? Mark it as deleted,
1677 1679 # matching dirstate behavior (issue5584).
1678 1680 # The dirstate has more complex behavior around whether a
1679 1681 # missing file matches a directory, etc, but we don't need to
1680 1682 # bother with that: if f has made it to this point, we're sure
1681 1683 # it's in the dirstate.
1682 1684 deleted.append(f)
1683 1685
1684 1686 return modified, deleted, fixup
1685 1687
1686 1688 def _poststatusfixup(self, status, fixup):
1687 1689 """update dirstate for files that are actually clean"""
1688 1690 poststatus = self._repo.postdsstatus()
1689 1691 if fixup or poststatus:
1690 1692 try:
1691 1693 oldid = self._repo.dirstate.identity()
1692 1694
1693 1695 # updating the dirstate is optional
1694 1696 # so we don't wait on the lock
1695 1697 # wlock can invalidate the dirstate, so cache normal _after_
1696 1698 # taking the lock
1697 1699 with self._repo.wlock(False):
1698 1700 if self._repo.dirstate.identity() == oldid:
1699 1701 if fixup:
1700 1702 normal = self._repo.dirstate.normal
1701 1703 for f in fixup:
1702 1704 normal(f)
1703 1705 # write changes out explicitly, because nesting
1704 1706 # wlock at runtime may prevent 'wlock.release()'
1705 1707 # after this block from doing so for subsequent
1706 1708 # changing files
1707 1709 tr = self._repo.currenttransaction()
1708 1710 self._repo.dirstate.write(tr)
1709 1711
1710 1712 if poststatus:
1711 1713 for ps in poststatus:
1712 1714 ps(self, status)
1713 1715 else:
1714 1716 # in this case, writing changes out breaks
1715 1717 # consistency, because .hg/dirstate was
1716 1718 # already changed simultaneously after last
1717 1719 # caching (see also issue5584 for detail)
1718 1720 self._repo.ui.debug('skip updating dirstate: '
1719 1721 'identity mismatch\n')
1720 1722 except error.LockError:
1721 1723 pass
1722 1724 finally:
1723 1725 # Even if the wlock couldn't be grabbed, clear out the list.
1724 1726 self._repo.clearpostdsstatus()
1725 1727
1726 1728 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1727 1729 '''Gets the status from the dirstate -- internal use only.'''
1728 1730 subrepos = []
1729 1731 if '.hgsub' in self:
1730 1732 subrepos = sorted(self.substate)
1731 1733 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1732 1734 clean=clean, unknown=unknown)
1733 1735
1734 1736 # check for any possibly clean files
1735 1737 fixup = []
1736 1738 if cmp:
1737 1739 modified2, deleted2, fixup = self._checklookup(cmp)
1738 1740 s.modified.extend(modified2)
1739 1741 s.deleted.extend(deleted2)
1740 1742
1741 1743 if fixup and clean:
1742 1744 s.clean.extend(fixup)
1743 1745
1744 1746 self._poststatusfixup(s, fixup)
1745 1747
1746 1748 if match.always():
1747 1749 # cache for performance
1748 1750 if s.unknown or s.ignored or s.clean:
1749 1751 # "_status" is cached with list*=False in the normal route
1750 1752 self._status = scmutil.status(s.modified, s.added, s.removed,
1751 1753 s.deleted, [], [], [])
1752 1754 else:
1753 1755 self._status = s
1754 1756
1755 1757 return s
1756 1758
1757 1759 @propertycache
1758 1760 def _manifest(self):
1759 1761 """generate a manifest corresponding to the values in self._status
1760 1762
1761 1763 This reuse the file nodeid from parent, but we use special node
1762 1764 identifiers for added and modified files. This is used by manifests
1763 1765 merge to see that files are different and by update logic to avoid
1764 1766 deleting newly added files.
1765 1767 """
1766 1768 return self._buildstatusmanifest(self._status)
1767 1769
1768 1770 def _buildstatusmanifest(self, status):
1769 1771 """Builds a manifest that includes the given status results."""
1770 1772 parents = self.parents()
1771 1773
1772 1774 man = parents[0].manifest().copy()
1773 1775
1774 1776 ff = self._flagfunc
1775 1777 for i, l in ((addednodeid, status.added),
1776 1778 (modifiednodeid, status.modified)):
1777 1779 for f in l:
1778 1780 man[f] = i
1779 1781 try:
1780 1782 man.setflag(f, ff(f))
1781 1783 except OSError:
1782 1784 pass
1783 1785
1784 1786 for f in status.deleted + status.removed:
1785 1787 if f in man:
1786 1788 del man[f]
1787 1789
1788 1790 return man
1789 1791
1790 1792 def _buildstatus(self, other, s, match, listignored, listclean,
1791 1793 listunknown):
1792 1794 """build a status with respect to another context
1793 1795
1794 1796 This includes logic for maintaining the fast path of status when
1795 1797 comparing the working directory against its parent, which is to skip
1796 1798 building a new manifest if self (working directory) is not comparing
1797 1799 against its parent (repo['.']).
1798 1800 """
1799 1801 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1800 1802 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1801 1803 # might have accidentally ended up with the entire contents of the file
1802 1804 # they are supposed to be linking to.
1803 1805 s.modified[:] = self._filtersuspectsymlink(s.modified)
1804 1806 if other != self._repo['.']:
1805 1807 s = super(workingctx, self)._buildstatus(other, s, match,
1806 1808 listignored, listclean,
1807 1809 listunknown)
1808 1810 return s
1809 1811
1810 1812 def _matchstatus(self, other, match):
1811 1813 """override the match method with a filter for directory patterns
1812 1814
1813 1815 We use inheritance to customize the match.bad method only in cases of
1814 1816 workingctx since it belongs only to the working directory when
1815 1817 comparing against the parent changeset.
1816 1818
1817 1819 If we aren't comparing against the working directory's parent, then we
1818 1820 just use the default match object sent to us.
1819 1821 """
1820 1822 if other != self._repo['.']:
1821 1823 def bad(f, msg):
1822 1824 # 'f' may be a directory pattern from 'match.files()',
1823 1825 # so 'f not in ctx1' is not enough
1824 1826 if f not in other and not other.hasdir(f):
1825 1827 self._repo.ui.warn('%s: %s\n' %
1826 1828 (self._repo.dirstate.pathto(f), msg))
1827 1829 match.bad = bad
1828 1830 return match
1829 1831
1830 1832 def markcommitted(self, node):
1831 1833 super(workingctx, self).markcommitted(node)
1832 1834
1833 1835 sparse.aftercommit(self._repo, node)
1834 1836
1835 1837 class committablefilectx(basefilectx):
1836 1838 """A committablefilectx provides common functionality for a file context
1837 1839 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1838 1840 def __init__(self, repo, path, filelog=None, ctx=None):
1839 1841 self._repo = repo
1840 1842 self._path = path
1841 1843 self._changeid = None
1842 1844 self._filerev = self._filenode = None
1843 1845
1844 1846 if filelog is not None:
1845 1847 self._filelog = filelog
1846 1848 if ctx:
1847 1849 self._changectx = ctx
1848 1850
1849 1851 def __nonzero__(self):
1850 1852 return True
1851 1853
1852 1854 __bool__ = __nonzero__
1853 1855
1854 1856 def linkrev(self):
1855 1857 # linked to self._changectx no matter if file is modified or not
1856 1858 return self.rev()
1857 1859
1858 1860 def parents(self):
1859 1861 '''return parent filectxs, following copies if necessary'''
1860 1862 def filenode(ctx, path):
1861 1863 return ctx._manifest.get(path, nullid)
1862 1864
1863 1865 path = self._path
1864 1866 fl = self._filelog
1865 1867 pcl = self._changectx._parents
1866 1868 renamed = self.renamed()
1867 1869
1868 1870 if renamed:
1869 1871 pl = [renamed + (None,)]
1870 1872 else:
1871 1873 pl = [(path, filenode(pcl[0], path), fl)]
1872 1874
1873 1875 for pc in pcl[1:]:
1874 1876 pl.append((path, filenode(pc, path), fl))
1875 1877
1876 1878 return [self._parentfilectx(p, fileid=n, filelog=l)
1877 1879 for p, n, l in pl if n != nullid]
1878 1880
1879 1881 def children(self):
1880 1882 return []
1881 1883
1882 1884 class workingfilectx(committablefilectx):
1883 1885 """A workingfilectx object makes access to data related to a particular
1884 1886 file in the working directory convenient."""
1885 1887 def __init__(self, repo, path, filelog=None, workingctx=None):
1886 1888 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1887 1889
1888 1890 @propertycache
1889 1891 def _changectx(self):
1890 1892 return workingctx(self._repo)
1891 1893
1892 1894 def data(self):
1893 1895 return self._repo.wread(self._path)
1894 1896 def renamed(self):
1895 1897 rp = self._repo.dirstate.copied(self._path)
1896 1898 if not rp:
1897 1899 return None
1898 1900 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1899 1901
1900 1902 def size(self):
1901 1903 return self._repo.wvfs.lstat(self._path).st_size
1902 1904 def date(self):
1903 1905 t, tz = self._changectx.date()
1904 1906 try:
1905 1907 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1906 1908 except OSError as err:
1907 1909 if err.errno != errno.ENOENT:
1908 1910 raise
1909 1911 return (t, tz)
1910 1912
1911 1913 def exists(self):
1912 1914 return self._repo.wvfs.exists(self._path)
1913 1915
1914 1916 def lexists(self):
1915 1917 return self._repo.wvfs.lexists(self._path)
1916 1918
1917 1919 def audit(self):
1918 1920 return self._repo.wvfs.audit(self._path)
1919 1921
1920 1922 def cmp(self, fctx):
1921 1923 """compare with other file context
1922 1924
1923 1925 returns True if different than fctx.
1924 1926 """
1925 1927 # fctx should be a filectx (not a workingfilectx)
1926 1928 # invert comparison to reuse the same code path
1927 1929 return fctx.cmp(self)
1928 1930
1929 1931 def remove(self, ignoremissing=False):
1930 1932 """wraps unlink for a repo's working directory"""
1931 1933 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1932 1934
1933 1935 def write(self, data, flags, backgroundclose=False):
1934 1936 """wraps repo.wwrite"""
1935 1937 self._repo.wwrite(self._path, data, flags,
1936 1938 backgroundclose=backgroundclose)
1937 1939
1938 1940 def markcopied(self, src):
1939 1941 """marks this file a copy of `src`"""
1940 1942 if self._repo.dirstate[self._path] in "nma":
1941 1943 self._repo.dirstate.copy(src, self._path)
1942 1944
1943 1945 def clearunknown(self):
1944 1946 """Removes conflicting items in the working directory so that
1945 1947 ``write()`` can be called successfully.
1946 1948 """
1947 1949 wvfs = self._repo.wvfs
1948 1950 f = self._path
1949 1951 wvfs.audit(f)
1950 1952 if wvfs.isdir(f) and not wvfs.islink(f):
1951 1953 wvfs.rmtree(f, forcibly=True)
1952 1954 for p in reversed(list(util.finddirs(f))):
1953 1955 if wvfs.isfileorlink(p):
1954 1956 wvfs.unlink(p)
1955 1957 break
1956 1958
1957 1959 def setflags(self, l, x):
1958 1960 self._repo.wvfs.setflags(self._path, l, x)
1959 1961
1960 1962 class overlayworkingctx(workingctx):
1961 1963 """Wraps another mutable context with a write-back cache that can be flushed
1962 1964 at a later time.
1963 1965
1964 1966 self._cache[path] maps to a dict with keys: {
1965 1967 'exists': bool?
1966 1968 'date': date?
1967 1969 'data': str?
1968 1970 'flags': str?
1969 1971 }
1970 1972 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1971 1973 is `False`, the file was deleted.
1972 1974 """
1973 1975
1974 1976 def __init__(self, repo, wrappedctx):
1975 1977 super(overlayworkingctx, self).__init__(repo)
1976 1978 self._repo = repo
1977 1979 self._wrappedctx = wrappedctx
1978 1980 self._clean()
1979 1981
1980 1982 def data(self, path):
1981 1983 if self.isdirty(path):
1982 1984 if self._cache[path]['exists']:
1983 1985 if self._cache[path]['data']:
1984 1986 return self._cache[path]['data']
1985 1987 else:
1986 1988 # Must fallback here, too, because we only set flags.
1987 1989 return self._wrappedctx[path].data()
1988 1990 else:
1989 1991 raise error.ProgrammingError("No such file or directory: %s" %
1990 1992 self._path)
1991 1993 else:
1992 1994 return self._wrappedctx[path].data()
1993 1995
1994 1996 def isinmemory(self):
1995 1997 return True
1996 1998
1997 1999 def filedate(self, path):
1998 2000 if self.isdirty(path):
1999 2001 return self._cache[path]['date']
2000 2002 else:
2001 2003 return self._wrappedctx[path].date()
2002 2004
2003 2005 def flags(self, path):
2004 2006 if self.isdirty(path):
2005 2007 if self._cache[path]['exists']:
2006 2008 return self._cache[path]['flags']
2007 2009 else:
2008 2010 raise error.ProgrammingError("No such file or directory: %s" %
2009 2011 self._path)
2010 2012 else:
2011 2013 return self._wrappedctx[path].flags()
2012 2014
2013 2015 def write(self, path, data, flags=''):
2014 2016 if data is None:
2015 2017 raise error.ProgrammingError("data must be non-None")
2016 2018 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2017 2019 flags=flags)
2018 2020
2019 2021 def setflags(self, path, l, x):
2020 2022 self._markdirty(path, exists=True, date=util.makedate(),
2021 2023 flags=(l and 'l' or '') + (x and 'x' or ''))
2022 2024
2023 2025 def remove(self, path):
2024 2026 self._markdirty(path, exists=False)
2025 2027
2026 2028 def exists(self, path):
2027 2029 """exists behaves like `lexists`, but needs to follow symlinks and
2028 2030 return False if they are broken.
2029 2031 """
2030 2032 if self.isdirty(path):
2031 2033 # If this path exists and is a symlink, "follow" it by calling
2032 2034 # exists on the destination path.
2033 2035 if (self._cache[path]['exists'] and
2034 2036 'l' in self._cache[path]['flags']):
2035 2037 return self.exists(self._cache[path]['data'].strip())
2036 2038 else:
2037 2039 return self._cache[path]['exists']
2038 2040 return self._wrappedctx[path].exists()
2039 2041
2040 2042 def lexists(self, path):
2041 2043 """lexists returns True if the path exists"""
2042 2044 if self.isdirty(path):
2043 2045 return self._cache[path]['exists']
2044 2046 return self._wrappedctx[path].lexists()
2045 2047
2046 2048 def size(self, path):
2047 2049 if self.isdirty(path):
2048 2050 if self._cache[path]['exists']:
2049 2051 return len(self._cache[path]['data'])
2050 2052 else:
2051 2053 raise error.ProgrammingError("No such file or directory: %s" %
2052 2054 self._path)
2053 2055 return self._wrappedctx[path].size()
2054 2056
2055 2057 def flushall(self):
2056 2058 for path in self._writeorder:
2057 2059 entry = self._cache[path]
2058 2060 if entry['exists']:
2059 2061 self._wrappedctx[path].clearunknown()
2060 2062 if entry['data'] is not None:
2061 2063 if entry['flags'] is None:
2062 2064 raise error.ProgrammingError('data set but not flags')
2063 2065 self._wrappedctx[path].write(
2064 2066 entry['data'],
2065 2067 entry['flags'])
2066 2068 else:
2067 2069 self._wrappedctx[path].setflags(
2068 2070 'l' in entry['flags'],
2069 2071 'x' in entry['flags'])
2070 2072 else:
2071 2073 self._wrappedctx[path].remove(path)
2072 2074 self._clean()
2073 2075
2074 2076 def isdirty(self, path):
2075 2077 return path in self._cache
2076 2078
2077 2079 def _clean(self):
2078 2080 self._cache = {}
2079 2081 self._writeorder = []
2080 2082
2081 2083 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2082 2084 if path not in self._cache:
2083 2085 self._writeorder.append(path)
2084 2086
2085 2087 self._cache[path] = {
2086 2088 'exists': exists,
2087 2089 'data': data,
2088 2090 'date': date,
2089 2091 'flags': flags,
2090 2092 }
2091 2093
2092 2094 def filectx(self, path, filelog=None):
2093 2095 return overlayworkingfilectx(self._repo, path, parent=self,
2094 2096 filelog=filelog)
2095 2097
2096 2098 class overlayworkingfilectx(workingfilectx):
2097 2099 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2098 2100 cache, which can be flushed through later by calling ``flush()``."""
2099 2101
2100 2102 def __init__(self, repo, path, filelog=None, parent=None):
2101 2103 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2102 2104 parent)
2103 2105 self._repo = repo
2104 2106 self._parent = parent
2105 2107 self._path = path
2106 2108
2107 2109 def cmp(self, fctx):
2108 2110 return self.data() != fctx.data()
2109 2111
2110 2112 def ctx(self):
2111 2113 return self._parent
2112 2114
2113 2115 def data(self):
2114 2116 return self._parent.data(self._path)
2115 2117
2116 2118 def date(self):
2117 2119 return self._parent.filedate(self._path)
2118 2120
2119 2121 def exists(self):
2120 2122 return self.lexists()
2121 2123
2122 2124 def lexists(self):
2123 2125 return self._parent.exists(self._path)
2124 2126
2125 2127 def renamed(self):
2126 2128 # Copies are currently tracked in the dirstate as before. Straight copy
2127 2129 # from workingfilectx.
2128 2130 rp = self._repo.dirstate.copied(self._path)
2129 2131 if not rp:
2130 2132 return None
2131 2133 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2132 2134
2133 2135 def size(self):
2134 2136 return self._parent.size(self._path)
2135 2137
2136 2138 def audit(self):
2137 2139 pass
2138 2140
2139 2141 def flags(self):
2140 2142 return self._parent.flags(self._path)
2141 2143
2142 2144 def setflags(self, islink, isexec):
2143 2145 return self._parent.setflags(self._path, islink, isexec)
2144 2146
2145 2147 def write(self, data, flags, backgroundclose=False):
2146 2148 return self._parent.write(self._path, data, flags)
2147 2149
2148 2150 def remove(self, ignoremissing=False):
2149 2151 return self._parent.remove(self._path)
2150 2152
2151 2153 class workingcommitctx(workingctx):
2152 2154 """A workingcommitctx object makes access to data related to
2153 2155 the revision being committed convenient.
2154 2156
2155 2157 This hides changes in the working directory, if they aren't
2156 2158 committed in this context.
2157 2159 """
2158 2160 def __init__(self, repo, changes,
2159 2161 text="", user=None, date=None, extra=None):
2160 2162 super(workingctx, self).__init__(repo, text, user, date, extra,
2161 2163 changes)
2162 2164
2163 2165 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2164 2166 """Return matched files only in ``self._status``
2165 2167
2166 2168 Uncommitted files appear "clean" via this context, even if
2167 2169 they aren't actually so in the working directory.
2168 2170 """
2169 2171 if clean:
2170 2172 clean = [f for f in self._manifest if f not in self._changedset]
2171 2173 else:
2172 2174 clean = []
2173 2175 return scmutil.status([f for f in self._status.modified if match(f)],
2174 2176 [f for f in self._status.added if match(f)],
2175 2177 [f for f in self._status.removed if match(f)],
2176 2178 [], [], [], clean)
2177 2179
2178 2180 @propertycache
2179 2181 def _changedset(self):
2180 2182 """Return the set of files changed in this context
2181 2183 """
2182 2184 changed = set(self._status.modified)
2183 2185 changed.update(self._status.added)
2184 2186 changed.update(self._status.removed)
2185 2187 return changed
2186 2188
2187 2189 def makecachingfilectxfn(func):
2188 2190 """Create a filectxfn that caches based on the path.
2189 2191
2190 2192 We can't use util.cachefunc because it uses all arguments as the cache
2191 2193 key and this creates a cycle since the arguments include the repo and
2192 2194 memctx.
2193 2195 """
2194 2196 cache = {}
2195 2197
2196 2198 def getfilectx(repo, memctx, path):
2197 2199 if path not in cache:
2198 2200 cache[path] = func(repo, memctx, path)
2199 2201 return cache[path]
2200 2202
2201 2203 return getfilectx
2202 2204
2203 2205 def memfilefromctx(ctx):
2204 2206 """Given a context return a memfilectx for ctx[path]
2205 2207
2206 2208 This is a convenience method for building a memctx based on another
2207 2209 context.
2208 2210 """
2209 2211 def getfilectx(repo, memctx, path):
2210 2212 fctx = ctx[path]
2211 2213 # this is weird but apparently we only keep track of one parent
2212 2214 # (why not only store that instead of a tuple?)
2213 2215 copied = fctx.renamed()
2214 2216 if copied:
2215 2217 copied = copied[0]
2216 2218 return memfilectx(repo, path, fctx.data(),
2217 2219 islink=fctx.islink(), isexec=fctx.isexec(),
2218 2220 copied=copied, memctx=memctx)
2219 2221
2220 2222 return getfilectx
2221 2223
2222 2224 def memfilefrompatch(patchstore):
2223 2225 """Given a patch (e.g. patchstore object) return a memfilectx
2224 2226
2225 2227 This is a convenience method for building a memctx based on a patchstore.
2226 2228 """
2227 2229 def getfilectx(repo, memctx, path):
2228 2230 data, mode, copied = patchstore.getfile(path)
2229 2231 if data is None:
2230 2232 return None
2231 2233 islink, isexec = mode
2232 2234 return memfilectx(repo, path, data, islink=islink,
2233 2235 isexec=isexec, copied=copied,
2234 2236 memctx=memctx)
2235 2237
2236 2238 return getfilectx
2237 2239
2238 2240 class memctx(committablectx):
2239 2241 """Use memctx to perform in-memory commits via localrepo.commitctx().
2240 2242
2241 2243 Revision information is supplied at initialization time while
2242 2244 related files data and is made available through a callback
2243 2245 mechanism. 'repo' is the current localrepo, 'parents' is a
2244 2246 sequence of two parent revisions identifiers (pass None for every
2245 2247 missing parent), 'text' is the commit message and 'files' lists
2246 2248 names of files touched by the revision (normalized and relative to
2247 2249 repository root).
2248 2250
2249 2251 filectxfn(repo, memctx, path) is a callable receiving the
2250 2252 repository, the current memctx object and the normalized path of
2251 2253 requested file, relative to repository root. It is fired by the
2252 2254 commit function for every file in 'files', but calls order is
2253 2255 undefined. If the file is available in the revision being
2254 2256 committed (updated or added), filectxfn returns a memfilectx
2255 2257 object. If the file was removed, filectxfn return None for recent
2256 2258 Mercurial. Moved files are represented by marking the source file
2257 2259 removed and the new file added with copy information (see
2258 2260 memfilectx).
2259 2261
2260 2262 user receives the committer name and defaults to current
2261 2263 repository username, date is the commit date in any format
2262 2264 supported by util.parsedate() and defaults to current date, extra
2263 2265 is a dictionary of metadata or is left empty.
2264 2266 """
2265 2267
2266 2268 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2267 2269 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2268 2270 # this field to determine what to do in filectxfn.
2269 2271 _returnnoneformissingfiles = True
2270 2272
2271 2273 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2272 2274 date=None, extra=None, branch=None, editor=False):
2273 2275 super(memctx, self).__init__(repo, text, user, date, extra)
2274 2276 self._rev = None
2275 2277 self._node = None
2276 2278 parents = [(p or nullid) for p in parents]
2277 2279 p1, p2 = parents
2278 2280 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2279 2281 files = sorted(set(files))
2280 2282 self._files = files
2281 2283 if branch is not None:
2282 2284 self._extra['branch'] = encoding.fromlocal(branch)
2283 2285 self.substate = {}
2284 2286
2285 2287 if isinstance(filectxfn, patch.filestore):
2286 2288 filectxfn = memfilefrompatch(filectxfn)
2287 2289 elif not callable(filectxfn):
2288 2290 # if store is not callable, wrap it in a function
2289 2291 filectxfn = memfilefromctx(filectxfn)
2290 2292
2291 2293 # memoizing increases performance for e.g. vcs convert scenarios.
2292 2294 self._filectxfn = makecachingfilectxfn(filectxfn)
2293 2295
2294 2296 if editor:
2295 2297 self._text = editor(self._repo, self, [])
2296 2298 self._repo.savecommitmessage(self._text)
2297 2299
2298 2300 def filectx(self, path, filelog=None):
2299 2301 """get a file context from the working directory
2300 2302
2301 2303 Returns None if file doesn't exist and should be removed."""
2302 2304 return self._filectxfn(self._repo, self, path)
2303 2305
2304 2306 def commit(self):
2305 2307 """commit context to the repo"""
2306 2308 return self._repo.commitctx(self)
2307 2309
2308 2310 @propertycache
2309 2311 def _manifest(self):
2310 2312 """generate a manifest based on the return values of filectxfn"""
2311 2313
2312 2314 # keep this simple for now; just worry about p1
2313 2315 pctx = self._parents[0]
2314 2316 man = pctx.manifest().copy()
2315 2317
2316 2318 for f in self._status.modified:
2317 2319 p1node = nullid
2318 2320 p2node = nullid
2319 2321 p = pctx[f].parents() # if file isn't in pctx, check p2?
2320 2322 if len(p) > 0:
2321 2323 p1node = p[0].filenode()
2322 2324 if len(p) > 1:
2323 2325 p2node = p[1].filenode()
2324 2326 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2325 2327
2326 2328 for f in self._status.added:
2327 2329 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2328 2330
2329 2331 for f in self._status.removed:
2330 2332 if f in man:
2331 2333 del man[f]
2332 2334
2333 2335 return man
2334 2336
2335 2337 @propertycache
2336 2338 def _status(self):
2337 2339 """Calculate exact status from ``files`` specified at construction
2338 2340 """
2339 2341 man1 = self.p1().manifest()
2340 2342 p2 = self._parents[1]
2341 2343 # "1 < len(self._parents)" can't be used for checking
2342 2344 # existence of the 2nd parent, because "memctx._parents" is
2343 2345 # explicitly initialized by the list, of which length is 2.
2344 2346 if p2.node() != nullid:
2345 2347 man2 = p2.manifest()
2346 2348 managing = lambda f: f in man1 or f in man2
2347 2349 else:
2348 2350 managing = lambda f: f in man1
2349 2351
2350 2352 modified, added, removed = [], [], []
2351 2353 for f in self._files:
2352 2354 if not managing(f):
2353 2355 added.append(f)
2354 2356 elif self[f]:
2355 2357 modified.append(f)
2356 2358 else:
2357 2359 removed.append(f)
2358 2360
2359 2361 return scmutil.status(modified, added, removed, [], [], [], [])
2360 2362
2361 2363 class memfilectx(committablefilectx):
2362 2364 """memfilectx represents an in-memory file to commit.
2363 2365
2364 2366 See memctx and committablefilectx for more details.
2365 2367 """
2366 2368 def __init__(self, repo, path, data, islink=False,
2367 2369 isexec=False, copied=None, memctx=None):
2368 2370 """
2369 2371 path is the normalized file path relative to repository root.
2370 2372 data is the file content as a string.
2371 2373 islink is True if the file is a symbolic link.
2372 2374 isexec is True if the file is executable.
2373 2375 copied is the source file path if current file was copied in the
2374 2376 revision being committed, or None."""
2375 2377 super(memfilectx, self).__init__(repo, path, None, memctx)
2376 2378 self._data = data
2377 2379 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2378 2380 self._copied = None
2379 2381 if copied:
2380 2382 self._copied = (copied, nullid)
2381 2383
2382 2384 def data(self):
2383 2385 return self._data
2384 2386
2385 2387 def remove(self, ignoremissing=False):
2386 2388 """wraps unlink for a repo's working directory"""
2387 2389 # need to figure out what to do here
2388 2390 del self._changectx[self._path]
2389 2391
2390 2392 def write(self, data, flags):
2391 2393 """wraps repo.wwrite"""
2392 2394 self._data = data
2393 2395
2394 2396 class overlayfilectx(committablefilectx):
2395 2397 """Like memfilectx but take an original filectx and optional parameters to
2396 2398 override parts of it. This is useful when fctx.data() is expensive (i.e.
2397 2399 flag processor is expensive) and raw data, flags, and filenode could be
2398 2400 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2399 2401 """
2400 2402
2401 2403 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2402 2404 copied=None, ctx=None):
2403 2405 """originalfctx: filecontext to duplicate
2404 2406
2405 2407 datafunc: None or a function to override data (file content). It is a
2406 2408 function to be lazy. path, flags, copied, ctx: None or overridden value
2407 2409
2408 2410 copied could be (path, rev), or False. copied could also be just path,
2409 2411 and will be converted to (path, nullid). This simplifies some callers.
2410 2412 """
2411 2413
2412 2414 if path is None:
2413 2415 path = originalfctx.path()
2414 2416 if ctx is None:
2415 2417 ctx = originalfctx.changectx()
2416 2418 ctxmatch = lambda: True
2417 2419 else:
2418 2420 ctxmatch = lambda: ctx == originalfctx.changectx()
2419 2421
2420 2422 repo = originalfctx.repo()
2421 2423 flog = originalfctx.filelog()
2422 2424 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2423 2425
2424 2426 if copied is None:
2425 2427 copied = originalfctx.renamed()
2426 2428 copiedmatch = lambda: True
2427 2429 else:
2428 2430 if copied and not isinstance(copied, tuple):
2429 2431 # repo._filecommit will recalculate copyrev so nullid is okay
2430 2432 copied = (copied, nullid)
2431 2433 copiedmatch = lambda: copied == originalfctx.renamed()
2432 2434
2433 2435 # When data, copied (could affect data), ctx (could affect filelog
2434 2436 # parents) are not overridden, rawdata, rawflags, and filenode may be
2435 2437 # reused (repo._filecommit should double check filelog parents).
2436 2438 #
2437 2439 # path, flags are not hashed in filelog (but in manifestlog) so they do
2438 2440 # not affect reusable here.
2439 2441 #
2440 2442 # If ctx or copied is overridden to a same value with originalfctx,
2441 2443 # still consider it's reusable. originalfctx.renamed() may be a bit
2442 2444 # expensive so it's not called unless necessary. Assuming datafunc is
2443 2445 # always expensive, do not call it for this "reusable" test.
2444 2446 reusable = datafunc is None and ctxmatch() and copiedmatch()
2445 2447
2446 2448 if datafunc is None:
2447 2449 datafunc = originalfctx.data
2448 2450 if flags is None:
2449 2451 flags = originalfctx.flags()
2450 2452
2451 2453 self._datafunc = datafunc
2452 2454 self._flags = flags
2453 2455 self._copied = copied
2454 2456
2455 2457 if reusable:
2456 2458 # copy extra fields from originalfctx
2457 2459 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2458 2460 for attr_ in attrs:
2459 2461 if util.safehasattr(originalfctx, attr_):
2460 2462 setattr(self, attr_, getattr(originalfctx, attr_))
2461 2463
2462 2464 def data(self):
2463 2465 return self._datafunc()
2464 2466
2465 2467 class metadataonlyctx(committablectx):
2466 2468 """Like memctx but it's reusing the manifest of different commit.
2467 2469 Intended to be used by lightweight operations that are creating
2468 2470 metadata-only changes.
2469 2471
2470 2472 Revision information is supplied at initialization time. 'repo' is the
2471 2473 current localrepo, 'ctx' is original revision which manifest we're reuisng
2472 2474 'parents' is a sequence of two parent revisions identifiers (pass None for
2473 2475 every missing parent), 'text' is the commit.
2474 2476
2475 2477 user receives the committer name and defaults to current repository
2476 2478 username, date is the commit date in any format supported by
2477 2479 util.parsedate() and defaults to current date, extra is a dictionary of
2478 2480 metadata or is left empty.
2479 2481 """
2480 2482 def __new__(cls, repo, originalctx, *args, **kwargs):
2481 2483 return super(metadataonlyctx, cls).__new__(cls, repo)
2482 2484
2483 2485 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2484 2486 date=None, extra=None, editor=False):
2485 2487 if text is None:
2486 2488 text = originalctx.description()
2487 2489 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2488 2490 self._rev = None
2489 2491 self._node = None
2490 2492 self._originalctx = originalctx
2491 2493 self._manifestnode = originalctx.manifestnode()
2492 2494 if parents is None:
2493 2495 parents = originalctx.parents()
2494 2496 else:
2495 2497 parents = [repo[p] for p in parents if p is not None]
2496 2498 parents = parents[:]
2497 2499 while len(parents) < 2:
2498 2500 parents.append(repo[nullid])
2499 2501 p1, p2 = self._parents = parents
2500 2502
2501 2503 # sanity check to ensure that the reused manifest parents are
2502 2504 # manifests of our commit parents
2503 2505 mp1, mp2 = self.manifestctx().parents
2504 2506 if p1 != nullid and p1.manifestnode() != mp1:
2505 2507 raise RuntimeError('can\'t reuse the manifest: '
2506 2508 'its p1 doesn\'t match the new ctx p1')
2507 2509 if p2 != nullid and p2.manifestnode() != mp2:
2508 2510 raise RuntimeError('can\'t reuse the manifest: '
2509 2511 'its p2 doesn\'t match the new ctx p2')
2510 2512
2511 2513 self._files = originalctx.files()
2512 2514 self.substate = {}
2513 2515
2514 2516 if editor:
2515 2517 self._text = editor(self._repo, self, [])
2516 2518 self._repo.savecommitmessage(self._text)
2517 2519
2518 2520 def manifestnode(self):
2519 2521 return self._manifestnode
2520 2522
2521 2523 @property
2522 2524 def _manifestctx(self):
2523 2525 return self._repo.manifestlog[self._manifestnode]
2524 2526
2525 2527 def filectx(self, path, filelog=None):
2526 2528 return self._originalctx.filectx(path, filelog=filelog)
2527 2529
2528 2530 def commit(self):
2529 2531 """commit context to the repo"""
2530 2532 return self._repo.commitctx(self)
2531 2533
2532 2534 @property
2533 2535 def _manifest(self):
2534 2536 return self._originalctx.manifest()
2535 2537
2536 2538 @propertycache
2537 2539 def _status(self):
2538 2540 """Calculate exact status from ``files`` specified in the ``origctx``
2539 2541 and parents manifests.
2540 2542 """
2541 2543 man1 = self.p1().manifest()
2542 2544 p2 = self._parents[1]
2543 2545 # "1 < len(self._parents)" can't be used for checking
2544 2546 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2545 2547 # explicitly initialized by the list, of which length is 2.
2546 2548 if p2.node() != nullid:
2547 2549 man2 = p2.manifest()
2548 2550 managing = lambda f: f in man1 or f in man2
2549 2551 else:
2550 2552 managing = lambda f: f in man1
2551 2553
2552 2554 modified, added, removed = [], [], []
2553 2555 for f in self._files:
2554 2556 if not managing(f):
2555 2557 added.append(f)
2556 2558 elif f in self:
2557 2559 modified.append(f)
2558 2560 else:
2559 2561 removed.append(f)
2560 2562
2561 2563 return scmutil.status(modified, added, removed, [], [], [], [])
2562 2564
2563 2565 class arbitraryfilectx(object):
2564 2566 """Allows you to use filectx-like functions on a file in an arbitrary
2565 2567 location on disk, possibly not in the working directory.
2566 2568 """
2567 2569 def __init__(self, path, repo=None):
2568 2570 # Repo is optional because contrib/simplemerge uses this class.
2569 2571 self._repo = repo
2570 2572 self._path = path
2571 2573
2572 2574 def cmp(self, fctx):
2573 2575 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2574 2576 # path if either side is a symlink.
2575 2577 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2576 2578 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2577 2579 # Add a fast-path for merge if both sides are disk-backed.
2578 2580 # Note that filecmp uses the opposite return values (True if same)
2579 2581 # from our cmp functions (True if different).
2580 2582 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2581 2583 return self.data() != fctx.data()
2582 2584
2583 2585 def path(self):
2584 2586 return self._path
2585 2587
2586 2588 def flags(self):
2587 2589 return ''
2588 2590
2589 2591 def data(self):
2590 2592 return util.readfile(self._path)
2591 2593
2592 2594 def decodeddata(self):
2593 2595 with open(self._path, "rb") as f:
2594 2596 return f.read()
2595 2597
2596 2598 def remove(self):
2597 2599 util.unlink(self._path)
2598 2600
2599 2601 def write(self, data, flags):
2600 2602 assert not flags
2601 2603 with open(self._path, "w") as f:
2602 2604 f.write(data)
@@ -1,276 +1,272 b''
1 1 #require killdaemons
2 2
3 3 $ hg clone http://localhost:$HGPORT/ copy
4 4 abort: * (glob)
5 5 [255]
6 6 $ test -d copy
7 7 [1]
8 8
9 9 This server doesn't do range requests so it's basically only good for
10 10 one pull
11 11
12 12 $ $PYTHON "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
13 13 > --logfile server.log
14 14 $ cat dumb.pid >> $DAEMON_PIDS
15 15 $ hg init remote
16 16 $ cd remote
17 17 $ echo foo > bar
18 18 $ echo c2 > '.dotfile with spaces'
19 19 $ hg add
20 20 adding .dotfile with spaces
21 21 adding bar
22 22 $ hg commit -m"test"
23 23 $ hg tip
24 24 changeset: 0:02770d679fb8
25 25 tag: tip
26 26 user: test
27 27 date: Thu Jan 01 00:00:00 1970 +0000
28 28 summary: test
29 29
30 30 $ cd ..
31 31 $ hg clone static-http://localhost:$HGPORT/remote local
32 32 requesting all changes
33 33 adding changesets
34 34 adding manifests
35 35 adding file changes
36 36 added 1 changesets with 2 changes to 2 files
37 37 new changesets 02770d679fb8
38 38 updating to branch default
39 39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 40 $ cd local
41 41 $ hg verify
42 42 checking changesets
43 43 checking manifests
44 44 crosschecking files in changesets and manifests
45 45 checking files
46 46 2 files, 1 changesets, 2 total revisions
47 47 $ cat bar
48 48 foo
49 49 $ cd ../remote
50 50 $ echo baz > quux
51 51 $ hg commit -A -mtest2
52 52 adding quux
53 53
54 54 check for HTTP opener failures when cachefile does not exist
55 55
56 56 $ rm .hg/cache/*
57 57 $ cd ../local
58 58 $ cat >> .hg/hgrc <<EOF
59 59 > [hooks]
60 60 > changegroup = sh -c "printenv.py changegroup"
61 61 > EOF
62 62 $ hg pull
63 63 pulling from static-http://localhost:$HGPORT/remote
64 64 searching for changes
65 65 adding changesets
66 66 adding manifests
67 67 adding file changes
68 68 added 1 changesets with 1 changes to 1 files
69 69 new changesets 4ac2e3648604
70 70 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT/remote
71 71 (run 'hg update' to get a working copy)
72 72
73 73 trying to push
74 74
75 75 $ hg update
76 76 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 77 $ echo more foo >> bar
78 78 $ hg commit -m"test"
79 79 $ hg push
80 80 pushing to static-http://localhost:$HGPORT/remote
81 81 abort: destination does not support push
82 82 [255]
83 83
84 84 trying clone -r
85 85
86 86 $ cd ..
87 87 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
88 88 abort: unknown revision 'doesnotexist'!
89 89 [255]
90 90 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
91 91 adding changesets
92 92 adding manifests
93 93 adding file changes
94 94 added 1 changesets with 2 changes to 2 files
95 95 new changesets 02770d679fb8
96 96 updating to branch default
97 97 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
98 98
99 99 test with "/" URI (issue747) and subrepo
100 100
101 101 $ hg init
102 102 $ hg init sub
103 103 $ touch sub/test
104 104 $ hg -R sub commit -A -m "test"
105 105 adding test
106 106 $ hg -R sub tag not-empty
107 107 $ echo sub=sub > .hgsub
108 108 $ echo a > a
109 109 $ hg add a .hgsub
110 110 $ hg -q ci -ma
111 111 $ hg clone static-http://localhost:$HGPORT/ local2
112 112 requesting all changes
113 113 adding changesets
114 114 adding manifests
115 115 adding file changes
116 116 added 1 changesets with 3 changes to 3 files
117 117 new changesets a9ebfbe8e587
118 118 updating to branch default
119 119 cloning subrepo sub from static-http://localhost:$HGPORT/sub
120 120 requesting all changes
121 121 adding changesets
122 122 adding manifests
123 123 adding file changes
124 124 added 2 changesets with 2 changes to 2 files
125 125 new changesets be090ea66256:322ea90975df
126 126 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
127 127 $ cd local2
128 128 $ hg verify
129 129 checking changesets
130 130 checking manifests
131 131 crosschecking files in changesets and manifests
132 132 checking files
133 133 3 files, 1 changesets, 3 total revisions
134 134 checking subrepo links
135 135 $ cat a
136 136 a
137 137 $ hg paths
138 138 default = static-http://localhost:$HGPORT/
139 139
140 140 test with empty repo (issue965)
141 141
142 142 $ cd ..
143 143 $ hg init remotempty
144 144 $ hg clone static-http://localhost:$HGPORT/remotempty local3
145 145 no changes found
146 146 updating to branch default
147 147 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 148 $ cd local3
149 149 $ hg verify
150 150 checking changesets
151 151 checking manifests
152 152 crosschecking files in changesets and manifests
153 153 checking files
154 154 0 files, 0 changesets, 0 total revisions
155 155 $ hg paths
156 156 default = static-http://localhost:$HGPORT/remotempty
157 157
158 158 test with non-repo
159 159
160 160 $ cd ..
161 161 $ mkdir notarepo
162 162 $ hg clone static-http://localhost:$HGPORT/notarepo local3
163 163 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
164 164 [255]
165 165
166 166 Clone with tags and branches works
167 167
168 168 $ hg init remote-with-names
169 169 $ cd remote-with-names
170 170 $ echo 0 > foo
171 171 $ hg -q commit -A -m initial
172 172 $ echo 1 > foo
173 173 $ hg commit -m 'commit 1'
174 174 $ hg -q up 0
175 175 $ hg branch mybranch
176 176 marked working directory as branch mybranch
177 177 (branches are permanent and global, did you want a bookmark?)
178 178 $ echo 2 > foo
179 179 $ hg commit -m 'commit 2 (mybranch)'
180 180 $ hg tag -r 1 'default-tag'
181 181 $ hg tag -r 2 'branch-tag'
182 182
183 183 $ cd ..
184 184
185 185 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
186 186 requesting all changes
187 187 adding changesets
188 188 adding manifests
189 189 adding file changes
190 190 added 5 changesets with 5 changes to 2 files (+1 heads)
191 191 new changesets 68986213bd44:0c325bd2b5a7
192 192 updating to branch default
193 193 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
194 194
195 195 Clone a specific branch works
196 196
197 197 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
198 198 adding changesets
199 199 adding manifests
200 200 adding file changes
201 201 added 4 changesets with 4 changes to 2 files
202 202 new changesets 68986213bd44:0c325bd2b5a7
203 203 updating to branch mybranch
204 204 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
205 205
206 206 Clone a specific tag works
207 207
208 208 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
209 209 adding changesets
210 210 adding manifests
211 211 adding file changes
212 212 added 2 changesets with 2 changes to 1 files
213 213 new changesets 68986213bd44:4ee3fcef1c80
214 214 updating to branch default
215 215 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 216
217 217 $ killdaemons.py
218 218
219 219 List of files accessed over HTTP:
220 220
221 221 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
222 222 /.hg/bookmarks
223 223 /.hg/bookmarks.current
224 224 /.hg/cache/hgtagsfnodes1
225 /.hg/dirstate
226 225 /.hg/requires
227 226 /.hg/store/00changelog.i
228 227 /.hg/store/00manifest.i
229 228 /.hg/store/data/%7E2ehgsub.i
230 229 /.hg/store/data/%7E2ehgsubstate.i
231 230 /.hg/store/data/a.i
232 231 /notarepo/.hg/00changelog.i
233 232 /notarepo/.hg/requires
234 233 /remote-with-names/.hg/bookmarks
235 234 /remote-with-names/.hg/bookmarks.current
236 235 /remote-with-names/.hg/cache/branch2-served
237 236 /remote-with-names/.hg/cache/hgtagsfnodes1
238 237 /remote-with-names/.hg/cache/tags2-served
239 /remote-with-names/.hg/dirstate
240 238 /remote-with-names/.hg/localtags
241 239 /remote-with-names/.hg/requires
242 240 /remote-with-names/.hg/store/00changelog.i
243 241 /remote-with-names/.hg/store/00manifest.i
244 242 /remote-with-names/.hg/store/data/%7E2ehgtags.i
245 243 /remote-with-names/.hg/store/data/foo.i
246 244 /remote/.hg/bookmarks
247 245 /remote/.hg/bookmarks.current
248 246 /remote/.hg/cache/branch2-base
249 247 /remote/.hg/cache/branch2-immutable
250 248 /remote/.hg/cache/branch2-served
251 249 /remote/.hg/cache/hgtagsfnodes1
252 250 /remote/.hg/cache/rbc-names-v1
253 251 /remote/.hg/cache/tags2-served
254 /remote/.hg/dirstate
255 252 /remote/.hg/localtags
256 253 /remote/.hg/requires
257 254 /remote/.hg/store/00changelog.i
258 255 /remote/.hg/store/00manifest.i
259 256 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i
260 257 /remote/.hg/store/data/%7E2ehgtags.i
261 258 /remote/.hg/store/data/bar.i
262 259 /remote/.hg/store/data/quux.i
263 260 /remotempty/.hg/bookmarks
264 261 /remotempty/.hg/bookmarks.current
265 262 /remotempty/.hg/requires
266 263 /remotempty/.hg/store/00changelog.i
267 264 /remotempty/.hg/store/00manifest.i
268 265 /sub/.hg/bookmarks
269 266 /sub/.hg/bookmarks.current
270 267 /sub/.hg/cache/hgtagsfnodes1
271 /sub/.hg/dirstate
272 268 /sub/.hg/requires
273 269 /sub/.hg/store/00changelog.i
274 270 /sub/.hg/store/00manifest.i
275 271 /sub/.hg/store/data/%7E2ehgtags.i
276 272 /sub/.hg/store/data/test.i
General Comments 0
You need to be logged in to leave comments. Login now