##// END OF EJS Templates
filectx: add isabsent method...
Siddharth Agarwal -
r26978:9b9d4bcc default
parent child Browse files
Show More
@@ -1,1937 +1,1944
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9
10 10 from node import nullid, nullrev, wdirid, short, hex, bin
11 11 from i18n import _
12 12 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
13 13 import match as matchmod
14 14 import os, errno, stat
15 15 import obsolete as obsmod
16 16 import repoview
17 17 import fileset
18 18 import revlog
19 19
20 20 propertycache = util.propertycache
21 21
22 22 # Phony node value to stand-in for new files in some uses of
23 23 # manifests. Manifests support 21-byte hashes for nodes which are
24 24 # dirty in the working copy.
25 25 _newnode = '!' * 21
26 26
27 27 nonascii = re.compile(r'[^\x21-\x7f]').search
28 28
29 29 class basectx(object):
30 30 """A basectx object represents the common logic for its children:
31 31 changectx: read-only context that is already present in the repo,
32 32 workingctx: a context that represents the working directory and can
33 33 be committed,
34 34 memctx: a context that represents changes in-memory and can also
35 35 be committed."""
36 36 def __new__(cls, repo, changeid='', *args, **kwargs):
37 37 if isinstance(changeid, basectx):
38 38 return changeid
39 39
40 40 o = super(basectx, cls).__new__(cls)
41 41
42 42 o._repo = repo
43 43 o._rev = nullrev
44 44 o._node = nullid
45 45
46 46 return o
47 47
48 48 def __str__(self):
49 49 return short(self.node())
50 50
51 51 def __int__(self):
52 52 return self.rev()
53 53
54 54 def __repr__(self):
55 55 return "<%s %s>" % (type(self).__name__, str(self))
56 56
57 57 def __eq__(self, other):
58 58 try:
59 59 return type(self) == type(other) and self._rev == other._rev
60 60 except AttributeError:
61 61 return False
62 62
63 63 def __ne__(self, other):
64 64 return not (self == other)
65 65
66 66 def __contains__(self, key):
67 67 return key in self._manifest
68 68
69 69 def __getitem__(self, key):
70 70 return self.filectx(key)
71 71
72 72 def __iter__(self):
73 73 return iter(self._manifest)
74 74
75 75 def _manifestmatches(self, match, s):
76 76 """generate a new manifest filtered by the match argument
77 77
78 78 This method is for internal use only and mainly exists to provide an
79 79 object oriented way for other contexts to customize the manifest
80 80 generation.
81 81 """
82 82 return self.manifest().matches(match)
83 83
84 84 def _matchstatus(self, other, match):
85 85 """return match.always if match is none
86 86
87 87 This internal method provides a way for child objects to override the
88 88 match operator.
89 89 """
90 90 return match or matchmod.always(self._repo.root, self._repo.getcwd())
91 91
92 92 def _buildstatus(self, other, s, match, listignored, listclean,
93 93 listunknown):
94 94 """build a status with respect to another context"""
95 95 # Load earliest manifest first for caching reasons. More specifically,
96 96 # if you have revisions 1000 and 1001, 1001 is probably stored as a
97 97 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
98 98 # 1000 and cache it so that when you read 1001, we just need to apply a
99 99 # delta to what's in the cache. So that's one full reconstruction + one
100 100 # delta application.
101 101 if self.rev() is not None and self.rev() < other.rev():
102 102 self.manifest()
103 103 mf1 = other._manifestmatches(match, s)
104 104 mf2 = self._manifestmatches(match, s)
105 105
106 106 modified, added = [], []
107 107 removed = []
108 108 clean = []
109 109 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
110 110 deletedset = set(deleted)
111 111 d = mf1.diff(mf2, clean=listclean)
112 112 for fn, value in d.iteritems():
113 113 if fn in deletedset:
114 114 continue
115 115 if value is None:
116 116 clean.append(fn)
117 117 continue
118 118 (node1, flag1), (node2, flag2) = value
119 119 if node1 is None:
120 120 added.append(fn)
121 121 elif node2 is None:
122 122 removed.append(fn)
123 123 elif node2 != _newnode:
124 124 # The file was not a new file in mf2, so an entry
125 125 # from diff is really a difference.
126 126 modified.append(fn)
127 127 elif self[fn].cmp(other[fn]):
128 128 # node2 was newnode, but the working file doesn't
129 129 # match the one in mf1.
130 130 modified.append(fn)
131 131 else:
132 132 clean.append(fn)
133 133
134 134 if removed:
135 135 # need to filter files if they are already reported as removed
136 136 unknown = [fn for fn in unknown if fn not in mf1]
137 137 ignored = [fn for fn in ignored if fn not in mf1]
138 138 # if they're deleted, don't report them as removed
139 139 removed = [fn for fn in removed if fn not in deletedset]
140 140
141 141 return scmutil.status(modified, added, removed, deleted, unknown,
142 142 ignored, clean)
143 143
144 144 @propertycache
145 145 def substate(self):
146 146 return subrepo.state(self, self._repo.ui)
147 147
148 148 def subrev(self, subpath):
149 149 return self.substate[subpath][1]
150 150
151 151 def rev(self):
152 152 return self._rev
153 153 def node(self):
154 154 return self._node
155 155 def hex(self):
156 156 return hex(self.node())
157 157 def manifest(self):
158 158 return self._manifest
159 159 def repo(self):
160 160 return self._repo
161 161 def phasestr(self):
162 162 return phases.phasenames[self.phase()]
163 163 def mutable(self):
164 164 return self.phase() > phases.public
165 165
166 166 def getfileset(self, expr):
167 167 return fileset.getfileset(self, expr)
168 168
169 169 def obsolete(self):
170 170 """True if the changeset is obsolete"""
171 171 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
172 172
173 173 def extinct(self):
174 174 """True if the changeset is extinct"""
175 175 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
176 176
177 177 def unstable(self):
178 178 """True if the changeset is not obsolete but it's ancestor are"""
179 179 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
180 180
181 181 def bumped(self):
182 182 """True if the changeset try to be a successor of a public changeset
183 183
184 184 Only non-public and non-obsolete changesets may be bumped.
185 185 """
186 186 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
187 187
188 188 def divergent(self):
189 189 """Is a successors of a changeset with multiple possible successors set
190 190
191 191 Only non-public and non-obsolete changesets may be divergent.
192 192 """
193 193 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
194 194
195 195 def troubled(self):
196 196 """True if the changeset is either unstable, bumped or divergent"""
197 197 return self.unstable() or self.bumped() or self.divergent()
198 198
199 199 def troubles(self):
200 200 """return the list of troubles affecting this changesets.
201 201
202 202 Troubles are returned as strings. possible values are:
203 203 - unstable,
204 204 - bumped,
205 205 - divergent.
206 206 """
207 207 troubles = []
208 208 if self.unstable():
209 209 troubles.append('unstable')
210 210 if self.bumped():
211 211 troubles.append('bumped')
212 212 if self.divergent():
213 213 troubles.append('divergent')
214 214 return troubles
215 215
216 216 def parents(self):
217 217 """return contexts for each parent changeset"""
218 218 return self._parents
219 219
220 220 def p1(self):
221 221 return self._parents[0]
222 222
223 223 def p2(self):
224 224 if len(self._parents) == 2:
225 225 return self._parents[1]
226 226 return changectx(self._repo, -1)
227 227
228 228 def _fileinfo(self, path):
229 229 if '_manifest' in self.__dict__:
230 230 try:
231 231 return self._manifest[path], self._manifest.flags(path)
232 232 except KeyError:
233 233 raise error.ManifestLookupError(self._node, path,
234 234 _('not found in manifest'))
235 235 if '_manifestdelta' in self.__dict__ or path in self.files():
236 236 if path in self._manifestdelta:
237 237 return (self._manifestdelta[path],
238 238 self._manifestdelta.flags(path))
239 239 node, flag = self._repo.manifest.find(self._changeset[0], path)
240 240 if not node:
241 241 raise error.ManifestLookupError(self._node, path,
242 242 _('not found in manifest'))
243 243
244 244 return node, flag
245 245
246 246 def filenode(self, path):
247 247 return self._fileinfo(path)[0]
248 248
249 249 def flags(self, path):
250 250 try:
251 251 return self._fileinfo(path)[1]
252 252 except error.LookupError:
253 253 return ''
254 254
255 255 def sub(self, path):
256 256 '''return a subrepo for the stored revision of path, never wdir()'''
257 257 return subrepo.subrepo(self, path)
258 258
259 259 def nullsub(self, path, pctx):
260 260 return subrepo.nullsubrepo(self, path, pctx)
261 261
262 262 def workingsub(self, path):
263 263 '''return a subrepo for the stored revision, or wdir if this is a wdir
264 264 context.
265 265 '''
266 266 return subrepo.subrepo(self, path, allowwdir=True)
267 267
268 268 def match(self, pats=[], include=None, exclude=None, default='glob',
269 269 listsubrepos=False, badfn=None):
270 270 r = self._repo
271 271 return matchmod.match(r.root, r.getcwd(), pats,
272 272 include, exclude, default,
273 273 auditor=r.auditor, ctx=self,
274 274 listsubrepos=listsubrepos, badfn=badfn)
275 275
276 276 def diff(self, ctx2=None, match=None, **opts):
277 277 """Returns a diff generator for the given contexts and matcher"""
278 278 if ctx2 is None:
279 279 ctx2 = self.p1()
280 280 if ctx2 is not None:
281 281 ctx2 = self._repo[ctx2]
282 282 diffopts = patch.diffopts(self._repo.ui, opts)
283 283 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
284 284
285 285 def dirs(self):
286 286 return self._manifest.dirs()
287 287
288 288 def hasdir(self, dir):
289 289 return self._manifest.hasdir(dir)
290 290
291 291 def dirty(self, missing=False, merge=True, branch=True):
292 292 return False
293 293
294 294 def status(self, other=None, match=None, listignored=False,
295 295 listclean=False, listunknown=False, listsubrepos=False):
296 296 """return status of files between two nodes or node and working
297 297 directory.
298 298
299 299 If other is None, compare this node with working directory.
300 300
301 301 returns (modified, added, removed, deleted, unknown, ignored, clean)
302 302 """
303 303
304 304 ctx1 = self
305 305 ctx2 = self._repo[other]
306 306
307 307 # This next code block is, admittedly, fragile logic that tests for
308 308 # reversing the contexts and wouldn't need to exist if it weren't for
309 309 # the fast (and common) code path of comparing the working directory
310 310 # with its first parent.
311 311 #
312 312 # What we're aiming for here is the ability to call:
313 313 #
314 314 # workingctx.status(parentctx)
315 315 #
316 316 # If we always built the manifest for each context and compared those,
317 317 # then we'd be done. But the special case of the above call means we
318 318 # just copy the manifest of the parent.
319 319 reversed = False
320 320 if (not isinstance(ctx1, changectx)
321 321 and isinstance(ctx2, changectx)):
322 322 reversed = True
323 323 ctx1, ctx2 = ctx2, ctx1
324 324
325 325 match = ctx2._matchstatus(ctx1, match)
326 326 r = scmutil.status([], [], [], [], [], [], [])
327 327 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
328 328 listunknown)
329 329
330 330 if reversed:
331 331 # Reverse added and removed. Clear deleted, unknown and ignored as
332 332 # these make no sense to reverse.
333 333 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
334 334 r.clean)
335 335
336 336 if listsubrepos:
337 337 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
338 338 rev2 = ctx2.subrev(subpath)
339 339 try:
340 340 submatch = matchmod.narrowmatcher(subpath, match)
341 341 s = sub.status(rev2, match=submatch, ignored=listignored,
342 342 clean=listclean, unknown=listunknown,
343 343 listsubrepos=True)
344 344 for rfiles, sfiles in zip(r, s):
345 345 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
346 346 except error.LookupError:
347 347 self._repo.ui.status(_("skipping missing "
348 348 "subrepository: %s\n") % subpath)
349 349
350 350 for l in r:
351 351 l.sort()
352 352
353 353 return r
354 354
355 355
356 356 def makememctx(repo, parents, text, user, date, branch, files, store,
357 357 editor=None, extra=None):
358 358 def getfilectx(repo, memctx, path):
359 359 data, mode, copied = store.getfile(path)
360 360 if data is None:
361 361 return None
362 362 islink, isexec = mode
363 363 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
364 364 copied=copied, memctx=memctx)
365 365 if extra is None:
366 366 extra = {}
367 367 if branch:
368 368 extra['branch'] = encoding.fromlocal(branch)
369 369 ctx = memctx(repo, parents, text, files, getfilectx, user,
370 370 date, extra, editor)
371 371 return ctx
372 372
373 373 class changectx(basectx):
374 374 """A changecontext object makes access to data related to a particular
375 375 changeset convenient. It represents a read-only context already present in
376 376 the repo."""
377 377 def __init__(self, repo, changeid=''):
378 378 """changeid is a revision number, node, or tag"""
379 379
380 380 # since basectx.__new__ already took care of copying the object, we
381 381 # don't need to do anything in __init__, so we just exit here
382 382 if isinstance(changeid, basectx):
383 383 return
384 384
385 385 if changeid == '':
386 386 changeid = '.'
387 387 self._repo = repo
388 388
389 389 try:
390 390 if isinstance(changeid, int):
391 391 self._node = repo.changelog.node(changeid)
392 392 self._rev = changeid
393 393 return
394 394 if isinstance(changeid, long):
395 395 changeid = str(changeid)
396 396 if changeid == 'null':
397 397 self._node = nullid
398 398 self._rev = nullrev
399 399 return
400 400 if changeid == 'tip':
401 401 self._node = repo.changelog.tip()
402 402 self._rev = repo.changelog.rev(self._node)
403 403 return
404 404 if changeid == '.' or changeid == repo.dirstate.p1():
405 405 # this is a hack to delay/avoid loading obsmarkers
406 406 # when we know that '.' won't be hidden
407 407 self._node = repo.dirstate.p1()
408 408 self._rev = repo.unfiltered().changelog.rev(self._node)
409 409 return
410 410 if len(changeid) == 20:
411 411 try:
412 412 self._node = changeid
413 413 self._rev = repo.changelog.rev(changeid)
414 414 return
415 415 except error.FilteredRepoLookupError:
416 416 raise
417 417 except LookupError:
418 418 pass
419 419
420 420 try:
421 421 r = int(changeid)
422 422 if str(r) != changeid:
423 423 raise ValueError
424 424 l = len(repo.changelog)
425 425 if r < 0:
426 426 r += l
427 427 if r < 0 or r >= l:
428 428 raise ValueError
429 429 self._rev = r
430 430 self._node = repo.changelog.node(r)
431 431 return
432 432 except error.FilteredIndexError:
433 433 raise
434 434 except (ValueError, OverflowError, IndexError):
435 435 pass
436 436
437 437 if len(changeid) == 40:
438 438 try:
439 439 self._node = bin(changeid)
440 440 self._rev = repo.changelog.rev(self._node)
441 441 return
442 442 except error.FilteredLookupError:
443 443 raise
444 444 except (TypeError, LookupError):
445 445 pass
446 446
447 447 # lookup bookmarks through the name interface
448 448 try:
449 449 self._node = repo.names.singlenode(repo, changeid)
450 450 self._rev = repo.changelog.rev(self._node)
451 451 return
452 452 except KeyError:
453 453 pass
454 454 except error.FilteredRepoLookupError:
455 455 raise
456 456 except error.RepoLookupError:
457 457 pass
458 458
459 459 self._node = repo.unfiltered().changelog._partialmatch(changeid)
460 460 if self._node is not None:
461 461 self._rev = repo.changelog.rev(self._node)
462 462 return
463 463
464 464 # lookup failed
465 465 # check if it might have come from damaged dirstate
466 466 #
467 467 # XXX we could avoid the unfiltered if we had a recognizable
468 468 # exception for filtered changeset access
469 469 if changeid in repo.unfiltered().dirstate.parents():
470 470 msg = _("working directory has unknown parent '%s'!")
471 471 raise error.Abort(msg % short(changeid))
472 472 try:
473 473 if len(changeid) == 20 and nonascii(changeid):
474 474 changeid = hex(changeid)
475 475 except TypeError:
476 476 pass
477 477 except (error.FilteredIndexError, error.FilteredLookupError,
478 478 error.FilteredRepoLookupError):
479 479 if repo.filtername.startswith('visible'):
480 480 msg = _("hidden revision '%s'") % changeid
481 481 hint = _('use --hidden to access hidden revisions')
482 482 raise error.FilteredRepoLookupError(msg, hint=hint)
483 483 msg = _("filtered revision '%s' (not in '%s' subset)")
484 484 msg %= (changeid, repo.filtername)
485 485 raise error.FilteredRepoLookupError(msg)
486 486 except IndexError:
487 487 pass
488 488 raise error.RepoLookupError(
489 489 _("unknown revision '%s'") % changeid)
490 490
491 491 def __hash__(self):
492 492 try:
493 493 return hash(self._rev)
494 494 except AttributeError:
495 495 return id(self)
496 496
497 497 def __nonzero__(self):
498 498 return self._rev != nullrev
499 499
500 500 @propertycache
501 501 def _changeset(self):
502 502 return self._repo.changelog.read(self.rev())
503 503
504 504 @propertycache
505 505 def _manifest(self):
506 506 return self._repo.manifest.read(self._changeset[0])
507 507
508 508 @propertycache
509 509 def _manifestdelta(self):
510 510 return self._repo.manifest.readdelta(self._changeset[0])
511 511
512 512 @propertycache
513 513 def _parents(self):
514 514 p = self._repo.changelog.parentrevs(self._rev)
515 515 if p[1] == nullrev:
516 516 p = p[:-1]
517 517 return [changectx(self._repo, x) for x in p]
518 518
519 519 def changeset(self):
520 520 return self._changeset
521 521 def manifestnode(self):
522 522 return self._changeset[0]
523 523
524 524 def user(self):
525 525 return self._changeset[1]
526 526 def date(self):
527 527 return self._changeset[2]
528 528 def files(self):
529 529 return self._changeset[3]
530 530 def description(self):
531 531 return self._changeset[4]
532 532 def branch(self):
533 533 return encoding.tolocal(self._changeset[5].get("branch"))
534 534 def closesbranch(self):
535 535 return 'close' in self._changeset[5]
536 536 def extra(self):
537 537 return self._changeset[5]
538 538 def tags(self):
539 539 return self._repo.nodetags(self._node)
540 540 def bookmarks(self):
541 541 return self._repo.nodebookmarks(self._node)
542 542 def phase(self):
543 543 return self._repo._phasecache.phase(self._repo, self._rev)
544 544 def hidden(self):
545 545 return self._rev in repoview.filterrevs(self._repo, 'visible')
546 546
547 547 def children(self):
548 548 """return contexts for each child changeset"""
549 549 c = self._repo.changelog.children(self._node)
550 550 return [changectx(self._repo, x) for x in c]
551 551
552 552 def ancestors(self):
553 553 for a in self._repo.changelog.ancestors([self._rev]):
554 554 yield changectx(self._repo, a)
555 555
556 556 def descendants(self):
557 557 for d in self._repo.changelog.descendants([self._rev]):
558 558 yield changectx(self._repo, d)
559 559
560 560 def filectx(self, path, fileid=None, filelog=None):
561 561 """get a file context from this changeset"""
562 562 if fileid is None:
563 563 fileid = self.filenode(path)
564 564 return filectx(self._repo, path, fileid=fileid,
565 565 changectx=self, filelog=filelog)
566 566
567 567 def ancestor(self, c2, warn=False):
568 568 """return the "best" ancestor context of self and c2
569 569
570 570 If there are multiple candidates, it will show a message and check
571 571 merge.preferancestor configuration before falling back to the
572 572 revlog ancestor."""
573 573 # deal with workingctxs
574 574 n2 = c2._node
575 575 if n2 is None:
576 576 n2 = c2._parents[0]._node
577 577 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
578 578 if not cahs:
579 579 anc = nullid
580 580 elif len(cahs) == 1:
581 581 anc = cahs[0]
582 582 else:
583 583 # experimental config: merge.preferancestor
584 584 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
585 585 try:
586 586 ctx = changectx(self._repo, r)
587 587 except error.RepoLookupError:
588 588 continue
589 589 anc = ctx.node()
590 590 if anc in cahs:
591 591 break
592 592 else:
593 593 anc = self._repo.changelog.ancestor(self._node, n2)
594 594 if warn:
595 595 self._repo.ui.status(
596 596 (_("note: using %s as ancestor of %s and %s\n") %
597 597 (short(anc), short(self._node), short(n2))) +
598 598 ''.join(_(" alternatively, use --config "
599 599 "merge.preferancestor=%s\n") %
600 600 short(n) for n in sorted(cahs) if n != anc))
601 601 return changectx(self._repo, anc)
602 602
603 603 def descendant(self, other):
604 604 """True if other is descendant of this changeset"""
605 605 return self._repo.changelog.descendant(self._rev, other._rev)
606 606
607 607 def walk(self, match):
608 608 '''Generates matching file names.'''
609 609
610 610 # Wrap match.bad method to have message with nodeid
611 611 def bad(fn, msg):
612 612 # The manifest doesn't know about subrepos, so don't complain about
613 613 # paths into valid subrepos.
614 614 if any(fn == s or fn.startswith(s + '/')
615 615 for s in self.substate):
616 616 return
617 617 match.bad(fn, _('no such file in rev %s') % self)
618 618
619 619 m = matchmod.badmatch(match, bad)
620 620 return self._manifest.walk(m)
621 621
622 622 def matches(self, match):
623 623 return self.walk(match)
624 624
625 625 class basefilectx(object):
626 626 """A filecontext object represents the common logic for its children:
627 627 filectx: read-only access to a filerevision that is already present
628 628 in the repo,
629 629 workingfilectx: a filecontext that represents files from the working
630 630 directory,
631 631 memfilectx: a filecontext that represents files in-memory."""
632 632 def __new__(cls, repo, path, *args, **kwargs):
633 633 return super(basefilectx, cls).__new__(cls)
634 634
635 635 @propertycache
636 636 def _filelog(self):
637 637 return self._repo.file(self._path)
638 638
639 639 @propertycache
640 640 def _changeid(self):
641 641 if '_changeid' in self.__dict__:
642 642 return self._changeid
643 643 elif '_changectx' in self.__dict__:
644 644 return self._changectx.rev()
645 645 elif '_descendantrev' in self.__dict__:
646 646 # this file context was created from a revision with a known
647 647 # descendant, we can (lazily) correct for linkrev aliases
648 648 return self._adjustlinkrev(self._path, self._filelog,
649 649 self._filenode, self._descendantrev)
650 650 else:
651 651 return self._filelog.linkrev(self._filerev)
652 652
653 653 @propertycache
654 654 def _filenode(self):
655 655 if '_fileid' in self.__dict__:
656 656 return self._filelog.lookup(self._fileid)
657 657 else:
658 658 return self._changectx.filenode(self._path)
659 659
660 660 @propertycache
661 661 def _filerev(self):
662 662 return self._filelog.rev(self._filenode)
663 663
664 664 @propertycache
665 665 def _repopath(self):
666 666 return self._path
667 667
668 668 def __nonzero__(self):
669 669 try:
670 670 self._filenode
671 671 return True
672 672 except error.LookupError:
673 673 # file is missing
674 674 return False
675 675
676 676 def __str__(self):
677 677 return "%s@%s" % (self.path(), self._changectx)
678 678
679 679 def __repr__(self):
680 680 return "<%s %s>" % (type(self).__name__, str(self))
681 681
682 682 def __hash__(self):
683 683 try:
684 684 return hash((self._path, self._filenode))
685 685 except AttributeError:
686 686 return id(self)
687 687
688 688 def __eq__(self, other):
689 689 try:
690 690 return (type(self) == type(other) and self._path == other._path
691 691 and self._filenode == other._filenode)
692 692 except AttributeError:
693 693 return False
694 694
695 695 def __ne__(self, other):
696 696 return not (self == other)
697 697
698 698 def filerev(self):
699 699 return self._filerev
700 700 def filenode(self):
701 701 return self._filenode
702 702 def flags(self):
703 703 return self._changectx.flags(self._path)
704 704 def filelog(self):
705 705 return self._filelog
706 706 def rev(self):
707 707 return self._changeid
708 708 def linkrev(self):
709 709 return self._filelog.linkrev(self._filerev)
710 710 def node(self):
711 711 return self._changectx.node()
712 712 def hex(self):
713 713 return self._changectx.hex()
714 714 def user(self):
715 715 return self._changectx.user()
716 716 def date(self):
717 717 return self._changectx.date()
718 718 def files(self):
719 719 return self._changectx.files()
720 720 def description(self):
721 721 return self._changectx.description()
722 722 def branch(self):
723 723 return self._changectx.branch()
724 724 def extra(self):
725 725 return self._changectx.extra()
726 726 def phase(self):
727 727 return self._changectx.phase()
728 728 def phasestr(self):
729 729 return self._changectx.phasestr()
730 730 def manifest(self):
731 731 return self._changectx.manifest()
732 732 def changectx(self):
733 733 return self._changectx
734 734 def repo(self):
735 735 return self._repo
736 736
737 737 def path(self):
738 738 return self._path
739 739
740 740 def isbinary(self):
741 741 try:
742 742 return util.binary(self.data())
743 743 except IOError:
744 744 return False
745 745 def isexec(self):
746 746 return 'x' in self.flags()
747 747 def islink(self):
748 748 return 'l' in self.flags()
749 749
750 def isabsent(self):
751 """whether this filectx represents a file not in self._changectx
752
753 This is mainly for merge code to detect change/delete conflicts. This is
754 expected to be True for all subclasses of basectx."""
755 return False
756
750 757 _customcmp = False
751 758 def cmp(self, fctx):
752 759 """compare with other file context
753 760
754 761 returns True if different than fctx.
755 762 """
756 763 if fctx._customcmp:
757 764 return fctx.cmp(self)
758 765
759 766 if (fctx._filerev is None
760 767 and (self._repo._encodefilterpats
761 768 # if file data starts with '\1\n', empty metadata block is
762 769 # prepended, which adds 4 bytes to filelog.size().
763 770 or self.size() - 4 == fctx.size())
764 771 or self.size() == fctx.size()):
765 772 return self._filelog.cmp(self._filenode, fctx.data())
766 773
767 774 return True
768 775
769 776 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
770 777 """return the first ancestor of <srcrev> introducing <fnode>
771 778
772 779 If the linkrev of the file revision does not point to an ancestor of
773 780 srcrev, we'll walk down the ancestors until we find one introducing
774 781 this file revision.
775 782
776 783 :repo: a localrepository object (used to access changelog and manifest)
777 784 :path: the file path
778 785 :fnode: the nodeid of the file revision
779 786 :filelog: the filelog of this path
780 787 :srcrev: the changeset revision we search ancestors from
781 788 :inclusive: if true, the src revision will also be checked
782 789 """
783 790 repo = self._repo
784 791 cl = repo.unfiltered().changelog
785 792 ma = repo.manifest
786 793 # fetch the linkrev
787 794 fr = filelog.rev(fnode)
788 795 lkr = filelog.linkrev(fr)
789 796 # hack to reuse ancestor computation when searching for renames
790 797 memberanc = getattr(self, '_ancestrycontext', None)
791 798 iteranc = None
792 799 if srcrev is None:
793 800 # wctx case, used by workingfilectx during mergecopy
794 801 revs = [p.rev() for p in self._repo[None].parents()]
795 802 inclusive = True # we skipped the real (revless) source
796 803 else:
797 804 revs = [srcrev]
798 805 if memberanc is None:
799 806 memberanc = iteranc = cl.ancestors(revs, lkr,
800 807 inclusive=inclusive)
801 808 # check if this linkrev is an ancestor of srcrev
802 809 if lkr not in memberanc:
803 810 if iteranc is None:
804 811 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
805 812 for a in iteranc:
806 813 ac = cl.read(a) # get changeset data (we avoid object creation)
807 814 if path in ac[3]: # checking the 'files' field.
808 815 # The file has been touched, check if the content is
809 816 # similar to the one we search for.
810 817 if fnode == ma.readfast(ac[0]).get(path):
811 818 return a
812 819 # In theory, we should never get out of that loop without a result.
813 820 # But if manifest uses a buggy file revision (not children of the
814 821 # one it replaces) we could. Such a buggy situation will likely
815 822 # result is crash somewhere else at to some point.
816 823 return lkr
817 824
818 825 def introrev(self):
819 826 """return the rev of the changeset which introduced this file revision
820 827
821 828 This method is different from linkrev because it take into account the
822 829 changeset the filectx was created from. It ensures the returned
823 830 revision is one of its ancestors. This prevents bugs from
824 831 'linkrev-shadowing' when a file revision is used by multiple
825 832 changesets.
826 833 """
827 834 lkr = self.linkrev()
828 835 attrs = vars(self)
829 836 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
830 837 if noctx or self.rev() == lkr:
831 838 return self.linkrev()
832 839 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
833 840 self.rev(), inclusive=True)
834 841
835 842 def _parentfilectx(self, path, fileid, filelog):
836 843 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
837 844 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
838 845 if '_changeid' in vars(self) or '_changectx' in vars(self):
839 846 # If self is associated with a changeset (probably explicitly
840 847 # fed), ensure the created filectx is associated with a
841 848 # changeset that is an ancestor of self.changectx.
842 849 # This lets us later use _adjustlinkrev to get a correct link.
843 850 fctx._descendantrev = self.rev()
844 851 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
845 852 elif '_descendantrev' in vars(self):
846 853 # Otherwise propagate _descendantrev if we have one associated.
847 854 fctx._descendantrev = self._descendantrev
848 855 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
849 856 return fctx
850 857
851 858 def parents(self):
852 859 _path = self._path
853 860 fl = self._filelog
854 861 parents = self._filelog.parents(self._filenode)
855 862 pl = [(_path, node, fl) for node in parents if node != nullid]
856 863
857 864 r = fl.renamed(self._filenode)
858 865 if r:
859 866 # - In the simple rename case, both parent are nullid, pl is empty.
860 867 # - In case of merge, only one of the parent is null id and should
861 868 # be replaced with the rename information. This parent is -always-
862 869 # the first one.
863 870 #
864 871 # As null id have always been filtered out in the previous list
865 872 # comprehension, inserting to 0 will always result in "replacing
866 873 # first nullid parent with rename information.
867 874 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
868 875
869 876 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
870 877
871 878 def p1(self):
872 879 return self.parents()[0]
873 880
874 881 def p2(self):
875 882 p = self.parents()
876 883 if len(p) == 2:
877 884 return p[1]
878 885 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
879 886
880 887 def annotate(self, follow=False, linenumber=None, diffopts=None):
881 888 '''returns a list of tuples of (ctx, line) for each line
882 889 in the file, where ctx is the filectx of the node where
883 890 that line was last changed.
884 891 This returns tuples of ((ctx, linenumber), line) for each line,
885 892 if "linenumber" parameter is NOT "None".
886 893 In such tuples, linenumber means one at the first appearance
887 894 in the managed file.
888 895 To reduce annotation cost,
889 896 this returns fixed value(False is used) as linenumber,
890 897 if "linenumber" parameter is "False".'''
891 898
892 899 if linenumber is None:
893 900 def decorate(text, rev):
894 901 return ([rev] * len(text.splitlines()), text)
895 902 elif linenumber:
896 903 def decorate(text, rev):
897 904 size = len(text.splitlines())
898 905 return ([(rev, i) for i in xrange(1, size + 1)], text)
899 906 else:
900 907 def decorate(text, rev):
901 908 return ([(rev, False)] * len(text.splitlines()), text)
902 909
903 910 def pair(parent, child):
904 911 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
905 912 refine=True)
906 913 for (a1, a2, b1, b2), t in blocks:
907 914 # Changed blocks ('!') or blocks made only of blank lines ('~')
908 915 # belong to the child.
909 916 if t == '=':
910 917 child[0][b1:b2] = parent[0][a1:a2]
911 918 return child
912 919
913 920 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
914 921
915 922 def parents(f):
916 923 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
917 924 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
918 925 # from the topmost introrev (= srcrev) down to p.linkrev() if it
919 926 # isn't an ancestor of the srcrev.
920 927 f._changeid
921 928 pl = f.parents()
922 929
923 930 # Don't return renamed parents if we aren't following.
924 931 if not follow:
925 932 pl = [p for p in pl if p.path() == f.path()]
926 933
927 934 # renamed filectx won't have a filelog yet, so set it
928 935 # from the cache to save time
929 936 for p in pl:
930 937 if not '_filelog' in p.__dict__:
931 938 p._filelog = getlog(p.path())
932 939
933 940 return pl
934 941
935 942 # use linkrev to find the first changeset where self appeared
936 943 base = self
937 944 introrev = self.introrev()
938 945 if self.rev() != introrev:
939 946 base = self.filectx(self.filenode(), changeid=introrev)
940 947 if getattr(base, '_ancestrycontext', None) is None:
941 948 cl = self._repo.changelog
942 949 if introrev is None:
943 950 # wctx is not inclusive, but works because _ancestrycontext
944 951 # is used to test filelog revisions
945 952 ac = cl.ancestors([p.rev() for p in base.parents()],
946 953 inclusive=True)
947 954 else:
948 955 ac = cl.ancestors([introrev], inclusive=True)
949 956 base._ancestrycontext = ac
950 957
951 958 # This algorithm would prefer to be recursive, but Python is a
952 959 # bit recursion-hostile. Instead we do an iterative
953 960 # depth-first search.
954 961
955 962 visit = [base]
956 963 hist = {}
957 964 pcache = {}
958 965 needed = {base: 1}
959 966 while visit:
960 967 f = visit[-1]
961 968 pcached = f in pcache
962 969 if not pcached:
963 970 pcache[f] = parents(f)
964 971
965 972 ready = True
966 973 pl = pcache[f]
967 974 for p in pl:
968 975 if p not in hist:
969 976 ready = False
970 977 visit.append(p)
971 978 if not pcached:
972 979 needed[p] = needed.get(p, 0) + 1
973 980 if ready:
974 981 visit.pop()
975 982 reusable = f in hist
976 983 if reusable:
977 984 curr = hist[f]
978 985 else:
979 986 curr = decorate(f.data(), f)
980 987 for p in pl:
981 988 if not reusable:
982 989 curr = pair(hist[p], curr)
983 990 if needed[p] == 1:
984 991 del hist[p]
985 992 del needed[p]
986 993 else:
987 994 needed[p] -= 1
988 995
989 996 hist[f] = curr
990 997 pcache[f] = []
991 998
992 999 return zip(hist[base][0], hist[base][1].splitlines(True))
993 1000
994 1001 def ancestors(self, followfirst=False):
995 1002 visit = {}
996 1003 c = self
997 1004 if followfirst:
998 1005 cut = 1
999 1006 else:
1000 1007 cut = None
1001 1008
1002 1009 while True:
1003 1010 for parent in c.parents()[:cut]:
1004 1011 visit[(parent.linkrev(), parent.filenode())] = parent
1005 1012 if not visit:
1006 1013 break
1007 1014 c = visit.pop(max(visit))
1008 1015 yield c
1009 1016
1010 1017 class filectx(basefilectx):
1011 1018 """A filecontext object makes access to data related to a particular
1012 1019 filerevision convenient."""
1013 1020 def __init__(self, repo, path, changeid=None, fileid=None,
1014 1021 filelog=None, changectx=None):
1015 1022 """changeid can be a changeset revision, node, or tag.
1016 1023 fileid can be a file revision or node."""
1017 1024 self._repo = repo
1018 1025 self._path = path
1019 1026
1020 1027 assert (changeid is not None
1021 1028 or fileid is not None
1022 1029 or changectx is not None), \
1023 1030 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1024 1031 % (changeid, fileid, changectx))
1025 1032
1026 1033 if filelog is not None:
1027 1034 self._filelog = filelog
1028 1035
1029 1036 if changeid is not None:
1030 1037 self._changeid = changeid
1031 1038 if changectx is not None:
1032 1039 self._changectx = changectx
1033 1040 if fileid is not None:
1034 1041 self._fileid = fileid
1035 1042
1036 1043 @propertycache
1037 1044 def _changectx(self):
1038 1045 try:
1039 1046 return changectx(self._repo, self._changeid)
1040 1047 except error.FilteredRepoLookupError:
1041 1048 # Linkrev may point to any revision in the repository. When the
1042 1049 # repository is filtered this may lead to `filectx` trying to build
1043 1050 # `changectx` for filtered revision. In such case we fallback to
1044 1051 # creating `changectx` on the unfiltered version of the reposition.
1045 1052 # This fallback should not be an issue because `changectx` from
1046 1053 # `filectx` are not used in complex operations that care about
1047 1054 # filtering.
1048 1055 #
1049 1056 # This fallback is a cheap and dirty fix that prevent several
1050 1057 # crashes. It does not ensure the behavior is correct. However the
1051 1058 # behavior was not correct before filtering either and "incorrect
1052 1059 # behavior" is seen as better as "crash"
1053 1060 #
1054 1061 # Linkrevs have several serious troubles with filtering that are
1055 1062 # complicated to solve. Proper handling of the issue here should be
1056 1063 # considered when solving linkrev issue are on the table.
1057 1064 return changectx(self._repo.unfiltered(), self._changeid)
1058 1065
1059 1066 def filectx(self, fileid, changeid=None):
1060 1067 '''opens an arbitrary revision of the file without
1061 1068 opening a new filelog'''
1062 1069 return filectx(self._repo, self._path, fileid=fileid,
1063 1070 filelog=self._filelog, changeid=changeid)
1064 1071
1065 1072 def data(self):
1066 1073 try:
1067 1074 return self._filelog.read(self._filenode)
1068 1075 except error.CensoredNodeError:
1069 1076 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1070 1077 return ""
1071 1078 raise error.Abort(_("censored node: %s") % short(self._filenode),
1072 1079 hint=_("set censor.policy to ignore errors"))
1073 1080
1074 1081 def size(self):
1075 1082 return self._filelog.size(self._filerev)
1076 1083
1077 1084 def renamed(self):
1078 1085 """check if file was actually renamed in this changeset revision
1079 1086
1080 1087 If rename logged in file revision, we report copy for changeset only
1081 1088 if file revisions linkrev points back to the changeset in question
1082 1089 or both changeset parents contain different file revisions.
1083 1090 """
1084 1091
1085 1092 renamed = self._filelog.renamed(self._filenode)
1086 1093 if not renamed:
1087 1094 return renamed
1088 1095
1089 1096 if self.rev() == self.linkrev():
1090 1097 return renamed
1091 1098
1092 1099 name = self.path()
1093 1100 fnode = self._filenode
1094 1101 for p in self._changectx.parents():
1095 1102 try:
1096 1103 if fnode == p.filenode(name):
1097 1104 return None
1098 1105 except error.LookupError:
1099 1106 pass
1100 1107 return renamed
1101 1108
1102 1109 def children(self):
1103 1110 # hard for renames
1104 1111 c = self._filelog.children(self._filenode)
1105 1112 return [filectx(self._repo, self._path, fileid=x,
1106 1113 filelog=self._filelog) for x in c]
1107 1114
1108 1115 class committablectx(basectx):
1109 1116 """A committablectx object provides common functionality for a context that
1110 1117 wants the ability to commit, e.g. workingctx or memctx."""
1111 1118 def __init__(self, repo, text="", user=None, date=None, extra=None,
1112 1119 changes=None):
1113 1120 self._repo = repo
1114 1121 self._rev = None
1115 1122 self._node = None
1116 1123 self._text = text
1117 1124 if date:
1118 1125 self._date = util.parsedate(date)
1119 1126 if user:
1120 1127 self._user = user
1121 1128 if changes:
1122 1129 self._status = changes
1123 1130
1124 1131 self._extra = {}
1125 1132 if extra:
1126 1133 self._extra = extra.copy()
1127 1134 if 'branch' not in self._extra:
1128 1135 try:
1129 1136 branch = encoding.fromlocal(self._repo.dirstate.branch())
1130 1137 except UnicodeDecodeError:
1131 1138 raise error.Abort(_('branch name not in UTF-8!'))
1132 1139 self._extra['branch'] = branch
1133 1140 if self._extra['branch'] == '':
1134 1141 self._extra['branch'] = 'default'
1135 1142
1136 1143 def __str__(self):
1137 1144 return str(self._parents[0]) + "+"
1138 1145
1139 1146 def __nonzero__(self):
1140 1147 return True
1141 1148
1142 1149 def _buildflagfunc(self):
1143 1150 # Create a fallback function for getting file flags when the
1144 1151 # filesystem doesn't support them
1145 1152
1146 1153 copiesget = self._repo.dirstate.copies().get
1147 1154
1148 1155 if len(self._parents) < 2:
1149 1156 # when we have one parent, it's easy: copy from parent
1150 1157 man = self._parents[0].manifest()
1151 1158 def func(f):
1152 1159 f = copiesget(f, f)
1153 1160 return man.flags(f)
1154 1161 else:
1155 1162 # merges are tricky: we try to reconstruct the unstored
1156 1163 # result from the merge (issue1802)
1157 1164 p1, p2 = self._parents
1158 1165 pa = p1.ancestor(p2)
1159 1166 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1160 1167
1161 1168 def func(f):
1162 1169 f = copiesget(f, f) # may be wrong for merges with copies
1163 1170 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1164 1171 if fl1 == fl2:
1165 1172 return fl1
1166 1173 if fl1 == fla:
1167 1174 return fl2
1168 1175 if fl2 == fla:
1169 1176 return fl1
1170 1177 return '' # punt for conflicts
1171 1178
1172 1179 return func
1173 1180
1174 1181 @propertycache
1175 1182 def _flagfunc(self):
1176 1183 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1177 1184
1178 1185 @propertycache
1179 1186 def _manifest(self):
1180 1187 """generate a manifest corresponding to the values in self._status
1181 1188
1182 1189 This reuse the file nodeid from parent, but we append an extra letter
1183 1190 when modified. Modified files get an extra 'm' while added files get
1184 1191 an extra 'a'. This is used by manifests merge to see that files
1185 1192 are different and by update logic to avoid deleting newly added files.
1186 1193 """
1187 1194
1188 1195 man1 = self._parents[0].manifest()
1189 1196 man = man1.copy()
1190 1197 if len(self._parents) > 1:
1191 1198 man2 = self.p2().manifest()
1192 1199 def getman(f):
1193 1200 if f in man1:
1194 1201 return man1
1195 1202 return man2
1196 1203 else:
1197 1204 getman = lambda f: man1
1198 1205
1199 1206 copied = self._repo.dirstate.copies()
1200 1207 ff = self._flagfunc
1201 1208 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1202 1209 for f in l:
1203 1210 orig = copied.get(f, f)
1204 1211 man[f] = getman(orig).get(orig, nullid) + i
1205 1212 try:
1206 1213 man.setflag(f, ff(f))
1207 1214 except OSError:
1208 1215 pass
1209 1216
1210 1217 for f in self._status.deleted + self._status.removed:
1211 1218 if f in man:
1212 1219 del man[f]
1213 1220
1214 1221 return man
1215 1222
1216 1223 @propertycache
1217 1224 def _status(self):
1218 1225 return self._repo.status()
1219 1226
1220 1227 @propertycache
1221 1228 def _user(self):
1222 1229 return self._repo.ui.username()
1223 1230
1224 1231 @propertycache
1225 1232 def _date(self):
1226 1233 return util.makedate()
1227 1234
1228 1235 def subrev(self, subpath):
1229 1236 return None
1230 1237
1231 1238 def manifestnode(self):
1232 1239 return None
1233 1240 def user(self):
1234 1241 return self._user or self._repo.ui.username()
1235 1242 def date(self):
1236 1243 return self._date
1237 1244 def description(self):
1238 1245 return self._text
1239 1246 def files(self):
1240 1247 return sorted(self._status.modified + self._status.added +
1241 1248 self._status.removed)
1242 1249
1243 1250 def modified(self):
1244 1251 return self._status.modified
1245 1252 def added(self):
1246 1253 return self._status.added
1247 1254 def removed(self):
1248 1255 return self._status.removed
1249 1256 def deleted(self):
1250 1257 return self._status.deleted
1251 1258 def branch(self):
1252 1259 return encoding.tolocal(self._extra['branch'])
1253 1260 def closesbranch(self):
1254 1261 return 'close' in self._extra
1255 1262 def extra(self):
1256 1263 return self._extra
1257 1264
1258 1265 def tags(self):
1259 1266 return []
1260 1267
1261 1268 def bookmarks(self):
1262 1269 b = []
1263 1270 for p in self.parents():
1264 1271 b.extend(p.bookmarks())
1265 1272 return b
1266 1273
1267 1274 def phase(self):
1268 1275 phase = phases.draft # default phase to draft
1269 1276 for p in self.parents():
1270 1277 phase = max(phase, p.phase())
1271 1278 return phase
1272 1279
1273 1280 def hidden(self):
1274 1281 return False
1275 1282
1276 1283 def children(self):
1277 1284 return []
1278 1285
1279 1286 def flags(self, path):
1280 1287 if '_manifest' in self.__dict__:
1281 1288 try:
1282 1289 return self._manifest.flags(path)
1283 1290 except KeyError:
1284 1291 return ''
1285 1292
1286 1293 try:
1287 1294 return self._flagfunc(path)
1288 1295 except OSError:
1289 1296 return ''
1290 1297
1291 1298 def ancestor(self, c2):
1292 1299 """return the "best" ancestor context of self and c2"""
1293 1300 return self._parents[0].ancestor(c2) # punt on two parents for now
1294 1301
1295 1302 def walk(self, match):
1296 1303 '''Generates matching file names.'''
1297 1304 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1298 1305 True, False))
1299 1306
1300 1307 def matches(self, match):
1301 1308 return sorted(self._repo.dirstate.matches(match))
1302 1309
1303 1310 def ancestors(self):
1304 1311 for p in self._parents:
1305 1312 yield p
1306 1313 for a in self._repo.changelog.ancestors(
1307 1314 [p.rev() for p in self._parents]):
1308 1315 yield changectx(self._repo, a)
1309 1316
1310 1317 def markcommitted(self, node):
1311 1318 """Perform post-commit cleanup necessary after committing this ctx
1312 1319
1313 1320 Specifically, this updates backing stores this working context
1314 1321 wraps to reflect the fact that the changes reflected by this
1315 1322 workingctx have been committed. For example, it marks
1316 1323 modified and added files as normal in the dirstate.
1317 1324
1318 1325 """
1319 1326
1320 1327 self._repo.dirstate.beginparentchange()
1321 1328 for f in self.modified() + self.added():
1322 1329 self._repo.dirstate.normal(f)
1323 1330 for f in self.removed():
1324 1331 self._repo.dirstate.drop(f)
1325 1332 self._repo.dirstate.setparents(node)
1326 1333 self._repo.dirstate.endparentchange()
1327 1334
1328 1335 # write changes out explicitly, because nesting wlock at
1329 1336 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1330 1337 # from immediately doing so for subsequent changing files
1331 1338 self._repo.dirstate.write(self._repo.currenttransaction())
1332 1339
1333 1340 class workingctx(committablectx):
1334 1341 """A workingctx object makes access to data related to
1335 1342 the current working directory convenient.
1336 1343 date - any valid date string or (unixtime, offset), or None.
1337 1344 user - username string, or None.
1338 1345 extra - a dictionary of extra values, or None.
1339 1346 changes - a list of file lists as returned by localrepo.status()
1340 1347 or None to use the repository status.
1341 1348 """
1342 1349 def __init__(self, repo, text="", user=None, date=None, extra=None,
1343 1350 changes=None):
1344 1351 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1345 1352
1346 1353 def __iter__(self):
1347 1354 d = self._repo.dirstate
1348 1355 for f in d:
1349 1356 if d[f] != 'r':
1350 1357 yield f
1351 1358
1352 1359 def __contains__(self, key):
1353 1360 return self._repo.dirstate[key] not in "?r"
1354 1361
1355 1362 def hex(self):
1356 1363 return hex(wdirid)
1357 1364
1358 1365 @propertycache
1359 1366 def _parents(self):
1360 1367 p = self._repo.dirstate.parents()
1361 1368 if p[1] == nullid:
1362 1369 p = p[:-1]
1363 1370 return [changectx(self._repo, x) for x in p]
1364 1371
1365 1372 def filectx(self, path, filelog=None):
1366 1373 """get a file context from the working directory"""
1367 1374 return workingfilectx(self._repo, path, workingctx=self,
1368 1375 filelog=filelog)
1369 1376
1370 1377 def dirty(self, missing=False, merge=True, branch=True):
1371 1378 "check whether a working directory is modified"
1372 1379 # check subrepos first
1373 1380 for s in sorted(self.substate):
1374 1381 if self.sub(s).dirty():
1375 1382 return True
1376 1383 # check current working dir
1377 1384 return ((merge and self.p2()) or
1378 1385 (branch and self.branch() != self.p1().branch()) or
1379 1386 self.modified() or self.added() or self.removed() or
1380 1387 (missing and self.deleted()))
1381 1388
1382 1389 def add(self, list, prefix=""):
1383 1390 join = lambda f: os.path.join(prefix, f)
1384 1391 wlock = self._repo.wlock()
1385 1392 ui, ds = self._repo.ui, self._repo.dirstate
1386 1393 try:
1387 1394 rejected = []
1388 1395 lstat = self._repo.wvfs.lstat
1389 1396 for f in list:
1390 1397 scmutil.checkportable(ui, join(f))
1391 1398 try:
1392 1399 st = lstat(f)
1393 1400 except OSError:
1394 1401 ui.warn(_("%s does not exist!\n") % join(f))
1395 1402 rejected.append(f)
1396 1403 continue
1397 1404 if st.st_size > 10000000:
1398 1405 ui.warn(_("%s: up to %d MB of RAM may be required "
1399 1406 "to manage this file\n"
1400 1407 "(use 'hg revert %s' to cancel the "
1401 1408 "pending addition)\n")
1402 1409 % (f, 3 * st.st_size // 1000000, join(f)))
1403 1410 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1404 1411 ui.warn(_("%s not added: only files and symlinks "
1405 1412 "supported currently\n") % join(f))
1406 1413 rejected.append(f)
1407 1414 elif ds[f] in 'amn':
1408 1415 ui.warn(_("%s already tracked!\n") % join(f))
1409 1416 elif ds[f] == 'r':
1410 1417 ds.normallookup(f)
1411 1418 else:
1412 1419 ds.add(f)
1413 1420 return rejected
1414 1421 finally:
1415 1422 wlock.release()
1416 1423
1417 1424 def forget(self, files, prefix=""):
1418 1425 join = lambda f: os.path.join(prefix, f)
1419 1426 wlock = self._repo.wlock()
1420 1427 try:
1421 1428 rejected = []
1422 1429 for f in files:
1423 1430 if f not in self._repo.dirstate:
1424 1431 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1425 1432 rejected.append(f)
1426 1433 elif self._repo.dirstate[f] != 'a':
1427 1434 self._repo.dirstate.remove(f)
1428 1435 else:
1429 1436 self._repo.dirstate.drop(f)
1430 1437 return rejected
1431 1438 finally:
1432 1439 wlock.release()
1433 1440
1434 1441 def undelete(self, list):
1435 1442 pctxs = self.parents()
1436 1443 wlock = self._repo.wlock()
1437 1444 try:
1438 1445 for f in list:
1439 1446 if self._repo.dirstate[f] != 'r':
1440 1447 self._repo.ui.warn(_("%s not removed!\n") % f)
1441 1448 else:
1442 1449 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1443 1450 t = fctx.data()
1444 1451 self._repo.wwrite(f, t, fctx.flags())
1445 1452 self._repo.dirstate.normal(f)
1446 1453 finally:
1447 1454 wlock.release()
1448 1455
1449 1456 def copy(self, source, dest):
1450 1457 try:
1451 1458 st = self._repo.wvfs.lstat(dest)
1452 1459 except OSError as err:
1453 1460 if err.errno != errno.ENOENT:
1454 1461 raise
1455 1462 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1456 1463 return
1457 1464 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1458 1465 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1459 1466 "symbolic link\n") % dest)
1460 1467 else:
1461 1468 wlock = self._repo.wlock()
1462 1469 try:
1463 1470 if self._repo.dirstate[dest] in '?':
1464 1471 self._repo.dirstate.add(dest)
1465 1472 elif self._repo.dirstate[dest] in 'r':
1466 1473 self._repo.dirstate.normallookup(dest)
1467 1474 self._repo.dirstate.copy(source, dest)
1468 1475 finally:
1469 1476 wlock.release()
1470 1477
1471 1478 def match(self, pats=[], include=None, exclude=None, default='glob',
1472 1479 listsubrepos=False, badfn=None):
1473 1480 r = self._repo
1474 1481
1475 1482 # Only a case insensitive filesystem needs magic to translate user input
1476 1483 # to actual case in the filesystem.
1477 1484 if not util.checkcase(r.root):
1478 1485 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1479 1486 exclude, default, r.auditor, self,
1480 1487 listsubrepos=listsubrepos,
1481 1488 badfn=badfn)
1482 1489 return matchmod.match(r.root, r.getcwd(), pats,
1483 1490 include, exclude, default,
1484 1491 auditor=r.auditor, ctx=self,
1485 1492 listsubrepos=listsubrepos, badfn=badfn)
1486 1493
1487 1494 def _filtersuspectsymlink(self, files):
1488 1495 if not files or self._repo.dirstate._checklink:
1489 1496 return files
1490 1497
1491 1498 # Symlink placeholders may get non-symlink-like contents
1492 1499 # via user error or dereferencing by NFS or Samba servers,
1493 1500 # so we filter out any placeholders that don't look like a
1494 1501 # symlink
1495 1502 sane = []
1496 1503 for f in files:
1497 1504 if self.flags(f) == 'l':
1498 1505 d = self[f].data()
1499 1506 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1500 1507 self._repo.ui.debug('ignoring suspect symlink placeholder'
1501 1508 ' "%s"\n' % f)
1502 1509 continue
1503 1510 sane.append(f)
1504 1511 return sane
1505 1512
1506 1513 def _checklookup(self, files):
1507 1514 # check for any possibly clean files
1508 1515 if not files:
1509 1516 return [], []
1510 1517
1511 1518 modified = []
1512 1519 fixup = []
1513 1520 pctx = self._parents[0]
1514 1521 # do a full compare of any files that might have changed
1515 1522 for f in sorted(files):
1516 1523 if (f not in pctx or self.flags(f) != pctx.flags(f)
1517 1524 or pctx[f].cmp(self[f])):
1518 1525 modified.append(f)
1519 1526 else:
1520 1527 fixup.append(f)
1521 1528
1522 1529 # update dirstate for files that are actually clean
1523 1530 if fixup:
1524 1531 try:
1525 1532 # updating the dirstate is optional
1526 1533 # so we don't wait on the lock
1527 1534 # wlock can invalidate the dirstate, so cache normal _after_
1528 1535 # taking the lock
1529 1536 wlock = self._repo.wlock(False)
1530 1537 normal = self._repo.dirstate.normal
1531 1538 try:
1532 1539 for f in fixup:
1533 1540 normal(f)
1534 1541 # write changes out explicitly, because nesting
1535 1542 # wlock at runtime may prevent 'wlock.release()'
1536 1543 # below from doing so for subsequent changing files
1537 1544 self._repo.dirstate.write(self._repo.currenttransaction())
1538 1545 finally:
1539 1546 wlock.release()
1540 1547 except error.LockError:
1541 1548 pass
1542 1549 return modified, fixup
1543 1550
1544 1551 def _manifestmatches(self, match, s):
1545 1552 """Slow path for workingctx
1546 1553
1547 1554 The fast path is when we compare the working directory to its parent
1548 1555 which means this function is comparing with a non-parent; therefore we
1549 1556 need to build a manifest and return what matches.
1550 1557 """
1551 1558 mf = self._repo['.']._manifestmatches(match, s)
1552 1559 for f in s.modified + s.added:
1553 1560 mf[f] = _newnode
1554 1561 mf.setflag(f, self.flags(f))
1555 1562 for f in s.removed:
1556 1563 if f in mf:
1557 1564 del mf[f]
1558 1565 return mf
1559 1566
1560 1567 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1561 1568 unknown=False):
1562 1569 '''Gets the status from the dirstate -- internal use only.'''
1563 1570 listignored, listclean, listunknown = ignored, clean, unknown
1564 1571 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1565 1572 subrepos = []
1566 1573 if '.hgsub' in self:
1567 1574 subrepos = sorted(self.substate)
1568 1575 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1569 1576 listclean, listunknown)
1570 1577
1571 1578 # check for any possibly clean files
1572 1579 if cmp:
1573 1580 modified2, fixup = self._checklookup(cmp)
1574 1581 s.modified.extend(modified2)
1575 1582
1576 1583 # update dirstate for files that are actually clean
1577 1584 if fixup and listclean:
1578 1585 s.clean.extend(fixup)
1579 1586
1580 1587 if match.always():
1581 1588 # cache for performance
1582 1589 if s.unknown or s.ignored or s.clean:
1583 1590 # "_status" is cached with list*=False in the normal route
1584 1591 self._status = scmutil.status(s.modified, s.added, s.removed,
1585 1592 s.deleted, [], [], [])
1586 1593 else:
1587 1594 self._status = s
1588 1595
1589 1596 return s
1590 1597
1591 1598 def _buildstatus(self, other, s, match, listignored, listclean,
1592 1599 listunknown):
1593 1600 """build a status with respect to another context
1594 1601
1595 1602 This includes logic for maintaining the fast path of status when
1596 1603 comparing the working directory against its parent, which is to skip
1597 1604 building a new manifest if self (working directory) is not comparing
1598 1605 against its parent (repo['.']).
1599 1606 """
1600 1607 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1601 1608 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1602 1609 # might have accidentally ended up with the entire contents of the file
1603 1610 # they are supposed to be linking to.
1604 1611 s.modified[:] = self._filtersuspectsymlink(s.modified)
1605 1612 if other != self._repo['.']:
1606 1613 s = super(workingctx, self)._buildstatus(other, s, match,
1607 1614 listignored, listclean,
1608 1615 listunknown)
1609 1616 return s
1610 1617
1611 1618 def _matchstatus(self, other, match):
1612 1619 """override the match method with a filter for directory patterns
1613 1620
1614 1621 We use inheritance to customize the match.bad method only in cases of
1615 1622 workingctx since it belongs only to the working directory when
1616 1623 comparing against the parent changeset.
1617 1624
1618 1625 If we aren't comparing against the working directory's parent, then we
1619 1626 just use the default match object sent to us.
1620 1627 """
1621 1628 superself = super(workingctx, self)
1622 1629 match = superself._matchstatus(other, match)
1623 1630 if other != self._repo['.']:
1624 1631 def bad(f, msg):
1625 1632 # 'f' may be a directory pattern from 'match.files()',
1626 1633 # so 'f not in ctx1' is not enough
1627 1634 if f not in other and not other.hasdir(f):
1628 1635 self._repo.ui.warn('%s: %s\n' %
1629 1636 (self._repo.dirstate.pathto(f), msg))
1630 1637 match.bad = bad
1631 1638 return match
1632 1639
1633 1640 class committablefilectx(basefilectx):
1634 1641 """A committablefilectx provides common functionality for a file context
1635 1642 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1636 1643 def __init__(self, repo, path, filelog=None, ctx=None):
1637 1644 self._repo = repo
1638 1645 self._path = path
1639 1646 self._changeid = None
1640 1647 self._filerev = self._filenode = None
1641 1648
1642 1649 if filelog is not None:
1643 1650 self._filelog = filelog
1644 1651 if ctx:
1645 1652 self._changectx = ctx
1646 1653
1647 1654 def __nonzero__(self):
1648 1655 return True
1649 1656
1650 1657 def linkrev(self):
1651 1658 # linked to self._changectx no matter if file is modified or not
1652 1659 return self.rev()
1653 1660
1654 1661 def parents(self):
1655 1662 '''return parent filectxs, following copies if necessary'''
1656 1663 def filenode(ctx, path):
1657 1664 return ctx._manifest.get(path, nullid)
1658 1665
1659 1666 path = self._path
1660 1667 fl = self._filelog
1661 1668 pcl = self._changectx._parents
1662 1669 renamed = self.renamed()
1663 1670
1664 1671 if renamed:
1665 1672 pl = [renamed + (None,)]
1666 1673 else:
1667 1674 pl = [(path, filenode(pcl[0], path), fl)]
1668 1675
1669 1676 for pc in pcl[1:]:
1670 1677 pl.append((path, filenode(pc, path), fl))
1671 1678
1672 1679 return [self._parentfilectx(p, fileid=n, filelog=l)
1673 1680 for p, n, l in pl if n != nullid]
1674 1681
1675 1682 def children(self):
1676 1683 return []
1677 1684
1678 1685 class workingfilectx(committablefilectx):
1679 1686 """A workingfilectx object makes access to data related to a particular
1680 1687 file in the working directory convenient."""
1681 1688 def __init__(self, repo, path, filelog=None, workingctx=None):
1682 1689 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1683 1690
1684 1691 @propertycache
1685 1692 def _changectx(self):
1686 1693 return workingctx(self._repo)
1687 1694
1688 1695 def data(self):
1689 1696 return self._repo.wread(self._path)
1690 1697 def renamed(self):
1691 1698 rp = self._repo.dirstate.copied(self._path)
1692 1699 if not rp:
1693 1700 return None
1694 1701 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1695 1702
1696 1703 def size(self):
1697 1704 return self._repo.wvfs.lstat(self._path).st_size
1698 1705 def date(self):
1699 1706 t, tz = self._changectx.date()
1700 1707 try:
1701 1708 return (util.statmtimesec(self._repo.wvfs.lstat(self._path)), tz)
1702 1709 except OSError as err:
1703 1710 if err.errno != errno.ENOENT:
1704 1711 raise
1705 1712 return (t, tz)
1706 1713
1707 1714 def cmp(self, fctx):
1708 1715 """compare with other file context
1709 1716
1710 1717 returns True if different than fctx.
1711 1718 """
1712 1719 # fctx should be a filectx (not a workingfilectx)
1713 1720 # invert comparison to reuse the same code path
1714 1721 return fctx.cmp(self)
1715 1722
1716 1723 def remove(self, ignoremissing=False):
1717 1724 """wraps unlink for a repo's working directory"""
1718 1725 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1719 1726
1720 1727 def write(self, data, flags):
1721 1728 """wraps repo.wwrite"""
1722 1729 self._repo.wwrite(self._path, data, flags)
1723 1730
1724 1731 class workingcommitctx(workingctx):
1725 1732 """A workingcommitctx object makes access to data related to
1726 1733 the revision being committed convenient.
1727 1734
1728 1735 This hides changes in the working directory, if they aren't
1729 1736 committed in this context.
1730 1737 """
1731 1738 def __init__(self, repo, changes,
1732 1739 text="", user=None, date=None, extra=None):
1733 1740 super(workingctx, self).__init__(repo, text, user, date, extra,
1734 1741 changes)
1735 1742
1736 1743 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1737 1744 unknown=False):
1738 1745 """Return matched files only in ``self._status``
1739 1746
1740 1747 Uncommitted files appear "clean" via this context, even if
1741 1748 they aren't actually so in the working directory.
1742 1749 """
1743 1750 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1744 1751 if clean:
1745 1752 clean = [f for f in self._manifest if f not in self._changedset]
1746 1753 else:
1747 1754 clean = []
1748 1755 return scmutil.status([f for f in self._status.modified if match(f)],
1749 1756 [f for f in self._status.added if match(f)],
1750 1757 [f for f in self._status.removed if match(f)],
1751 1758 [], [], [], clean)
1752 1759
1753 1760 @propertycache
1754 1761 def _changedset(self):
1755 1762 """Return the set of files changed in this context
1756 1763 """
1757 1764 changed = set(self._status.modified)
1758 1765 changed.update(self._status.added)
1759 1766 changed.update(self._status.removed)
1760 1767 return changed
1761 1768
1762 1769 class memctx(committablectx):
1763 1770 """Use memctx to perform in-memory commits via localrepo.commitctx().
1764 1771
1765 1772 Revision information is supplied at initialization time while
1766 1773 related files data and is made available through a callback
1767 1774 mechanism. 'repo' is the current localrepo, 'parents' is a
1768 1775 sequence of two parent revisions identifiers (pass None for every
1769 1776 missing parent), 'text' is the commit message and 'files' lists
1770 1777 names of files touched by the revision (normalized and relative to
1771 1778 repository root).
1772 1779
1773 1780 filectxfn(repo, memctx, path) is a callable receiving the
1774 1781 repository, the current memctx object and the normalized path of
1775 1782 requested file, relative to repository root. It is fired by the
1776 1783 commit function for every file in 'files', but calls order is
1777 1784 undefined. If the file is available in the revision being
1778 1785 committed (updated or added), filectxfn returns a memfilectx
1779 1786 object. If the file was removed, filectxfn raises an
1780 1787 IOError. Moved files are represented by marking the source file
1781 1788 removed and the new file added with copy information (see
1782 1789 memfilectx).
1783 1790
1784 1791 user receives the committer name and defaults to current
1785 1792 repository username, date is the commit date in any format
1786 1793 supported by util.parsedate() and defaults to current date, extra
1787 1794 is a dictionary of metadata or is left empty.
1788 1795 """
1789 1796
1790 1797 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1791 1798 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1792 1799 # this field to determine what to do in filectxfn.
1793 1800 _returnnoneformissingfiles = True
1794 1801
1795 1802 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1796 1803 date=None, extra=None, editor=False):
1797 1804 super(memctx, self).__init__(repo, text, user, date, extra)
1798 1805 self._rev = None
1799 1806 self._node = None
1800 1807 parents = [(p or nullid) for p in parents]
1801 1808 p1, p2 = parents
1802 1809 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1803 1810 files = sorted(set(files))
1804 1811 self._files = files
1805 1812 self.substate = {}
1806 1813
1807 1814 # if store is not callable, wrap it in a function
1808 1815 if not callable(filectxfn):
1809 1816 def getfilectx(repo, memctx, path):
1810 1817 fctx = filectxfn[path]
1811 1818 # this is weird but apparently we only keep track of one parent
1812 1819 # (why not only store that instead of a tuple?)
1813 1820 copied = fctx.renamed()
1814 1821 if copied:
1815 1822 copied = copied[0]
1816 1823 return memfilectx(repo, path, fctx.data(),
1817 1824 islink=fctx.islink(), isexec=fctx.isexec(),
1818 1825 copied=copied, memctx=memctx)
1819 1826 self._filectxfn = getfilectx
1820 1827 else:
1821 1828 # "util.cachefunc" reduces invocation of possibly expensive
1822 1829 # "filectxfn" for performance (e.g. converting from another VCS)
1823 1830 self._filectxfn = util.cachefunc(filectxfn)
1824 1831
1825 1832 if extra:
1826 1833 self._extra = extra.copy()
1827 1834 else:
1828 1835 self._extra = {}
1829 1836
1830 1837 if self._extra.get('branch', '') == '':
1831 1838 self._extra['branch'] = 'default'
1832 1839
1833 1840 if editor:
1834 1841 self._text = editor(self._repo, self, [])
1835 1842 self._repo.savecommitmessage(self._text)
1836 1843
1837 1844 def filectx(self, path, filelog=None):
1838 1845 """get a file context from the working directory
1839 1846
1840 1847 Returns None if file doesn't exist and should be removed."""
1841 1848 return self._filectxfn(self._repo, self, path)
1842 1849
1843 1850 def commit(self):
1844 1851 """commit context to the repo"""
1845 1852 return self._repo.commitctx(self)
1846 1853
1847 1854 @propertycache
1848 1855 def _manifest(self):
1849 1856 """generate a manifest based on the return values of filectxfn"""
1850 1857
1851 1858 # keep this simple for now; just worry about p1
1852 1859 pctx = self._parents[0]
1853 1860 man = pctx.manifest().copy()
1854 1861
1855 1862 for f in self._status.modified:
1856 1863 p1node = nullid
1857 1864 p2node = nullid
1858 1865 p = pctx[f].parents() # if file isn't in pctx, check p2?
1859 1866 if len(p) > 0:
1860 1867 p1node = p[0].node()
1861 1868 if len(p) > 1:
1862 1869 p2node = p[1].node()
1863 1870 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1864 1871
1865 1872 for f in self._status.added:
1866 1873 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1867 1874
1868 1875 for f in self._status.removed:
1869 1876 if f in man:
1870 1877 del man[f]
1871 1878
1872 1879 return man
1873 1880
1874 1881 @propertycache
1875 1882 def _status(self):
1876 1883 """Calculate exact status from ``files`` specified at construction
1877 1884 """
1878 1885 man1 = self.p1().manifest()
1879 1886 p2 = self._parents[1]
1880 1887 # "1 < len(self._parents)" can't be used for checking
1881 1888 # existence of the 2nd parent, because "memctx._parents" is
1882 1889 # explicitly initialized by the list, of which length is 2.
1883 1890 if p2.node() != nullid:
1884 1891 man2 = p2.manifest()
1885 1892 managing = lambda f: f in man1 or f in man2
1886 1893 else:
1887 1894 managing = lambda f: f in man1
1888 1895
1889 1896 modified, added, removed = [], [], []
1890 1897 for f in self._files:
1891 1898 if not managing(f):
1892 1899 added.append(f)
1893 1900 elif self[f]:
1894 1901 modified.append(f)
1895 1902 else:
1896 1903 removed.append(f)
1897 1904
1898 1905 return scmutil.status(modified, added, removed, [], [], [], [])
1899 1906
1900 1907 class memfilectx(committablefilectx):
1901 1908 """memfilectx represents an in-memory file to commit.
1902 1909
1903 1910 See memctx and committablefilectx for more details.
1904 1911 """
1905 1912 def __init__(self, repo, path, data, islink=False,
1906 1913 isexec=False, copied=None, memctx=None):
1907 1914 """
1908 1915 path is the normalized file path relative to repository root.
1909 1916 data is the file content as a string.
1910 1917 islink is True if the file is a symbolic link.
1911 1918 isexec is True if the file is executable.
1912 1919 copied is the source file path if current file was copied in the
1913 1920 revision being committed, or None."""
1914 1921 super(memfilectx, self).__init__(repo, path, None, memctx)
1915 1922 self._data = data
1916 1923 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1917 1924 self._copied = None
1918 1925 if copied:
1919 1926 self._copied = (copied, nullid)
1920 1927
1921 1928 def data(self):
1922 1929 return self._data
1923 1930 def size(self):
1924 1931 return len(self.data())
1925 1932 def flags(self):
1926 1933 return self._flags
1927 1934 def renamed(self):
1928 1935 return self._copied
1929 1936
1930 1937 def remove(self, ignoremissing=False):
1931 1938 """wraps unlink for a repo's working directory"""
1932 1939 # need to figure out what to do here
1933 1940 del self._changectx[self._path]
1934 1941
1935 1942 def write(self, data, flags):
1936 1943 """wraps repo.wwrite"""
1937 1944 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now