##// END OF EJS Templates
filectx: allow custom comparators...
Siddharth Agarwal -
r26977:bd19561b default
parent child Browse files
Show More
@@ -1,1933 +1,1937
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import re
9 9
10 10 from node import nullid, nullrev, wdirid, short, hex, bin
11 11 from i18n import _
12 12 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
13 13 import match as matchmod
14 14 import os, errno, stat
15 15 import obsolete as obsmod
16 16 import repoview
17 17 import fileset
18 18 import revlog
19 19
20 20 propertycache = util.propertycache
21 21
22 22 # Phony node value to stand-in for new files in some uses of
23 23 # manifests. Manifests support 21-byte hashes for nodes which are
24 24 # dirty in the working copy.
25 25 _newnode = '!' * 21
26 26
27 27 nonascii = re.compile(r'[^\x21-\x7f]').search
28 28
29 29 class basectx(object):
30 30 """A basectx object represents the common logic for its children:
31 31 changectx: read-only context that is already present in the repo,
32 32 workingctx: a context that represents the working directory and can
33 33 be committed,
34 34 memctx: a context that represents changes in-memory and can also
35 35 be committed."""
36 36 def __new__(cls, repo, changeid='', *args, **kwargs):
37 37 if isinstance(changeid, basectx):
38 38 return changeid
39 39
40 40 o = super(basectx, cls).__new__(cls)
41 41
42 42 o._repo = repo
43 43 o._rev = nullrev
44 44 o._node = nullid
45 45
46 46 return o
47 47
48 48 def __str__(self):
49 49 return short(self.node())
50 50
51 51 def __int__(self):
52 52 return self.rev()
53 53
54 54 def __repr__(self):
55 55 return "<%s %s>" % (type(self).__name__, str(self))
56 56
57 57 def __eq__(self, other):
58 58 try:
59 59 return type(self) == type(other) and self._rev == other._rev
60 60 except AttributeError:
61 61 return False
62 62
63 63 def __ne__(self, other):
64 64 return not (self == other)
65 65
66 66 def __contains__(self, key):
67 67 return key in self._manifest
68 68
69 69 def __getitem__(self, key):
70 70 return self.filectx(key)
71 71
72 72 def __iter__(self):
73 73 return iter(self._manifest)
74 74
75 75 def _manifestmatches(self, match, s):
76 76 """generate a new manifest filtered by the match argument
77 77
78 78 This method is for internal use only and mainly exists to provide an
79 79 object oriented way for other contexts to customize the manifest
80 80 generation.
81 81 """
82 82 return self.manifest().matches(match)
83 83
84 84 def _matchstatus(self, other, match):
85 85 """return match.always if match is none
86 86
87 87 This internal method provides a way for child objects to override the
88 88 match operator.
89 89 """
90 90 return match or matchmod.always(self._repo.root, self._repo.getcwd())
91 91
92 92 def _buildstatus(self, other, s, match, listignored, listclean,
93 93 listunknown):
94 94 """build a status with respect to another context"""
95 95 # Load earliest manifest first for caching reasons. More specifically,
96 96 # if you have revisions 1000 and 1001, 1001 is probably stored as a
97 97 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
98 98 # 1000 and cache it so that when you read 1001, we just need to apply a
99 99 # delta to what's in the cache. So that's one full reconstruction + one
100 100 # delta application.
101 101 if self.rev() is not None and self.rev() < other.rev():
102 102 self.manifest()
103 103 mf1 = other._manifestmatches(match, s)
104 104 mf2 = self._manifestmatches(match, s)
105 105
106 106 modified, added = [], []
107 107 removed = []
108 108 clean = []
109 109 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
110 110 deletedset = set(deleted)
111 111 d = mf1.diff(mf2, clean=listclean)
112 112 for fn, value in d.iteritems():
113 113 if fn in deletedset:
114 114 continue
115 115 if value is None:
116 116 clean.append(fn)
117 117 continue
118 118 (node1, flag1), (node2, flag2) = value
119 119 if node1 is None:
120 120 added.append(fn)
121 121 elif node2 is None:
122 122 removed.append(fn)
123 123 elif node2 != _newnode:
124 124 # The file was not a new file in mf2, so an entry
125 125 # from diff is really a difference.
126 126 modified.append(fn)
127 127 elif self[fn].cmp(other[fn]):
128 128 # node2 was newnode, but the working file doesn't
129 129 # match the one in mf1.
130 130 modified.append(fn)
131 131 else:
132 132 clean.append(fn)
133 133
134 134 if removed:
135 135 # need to filter files if they are already reported as removed
136 136 unknown = [fn for fn in unknown if fn not in mf1]
137 137 ignored = [fn for fn in ignored if fn not in mf1]
138 138 # if they're deleted, don't report them as removed
139 139 removed = [fn for fn in removed if fn not in deletedset]
140 140
141 141 return scmutil.status(modified, added, removed, deleted, unknown,
142 142 ignored, clean)
143 143
144 144 @propertycache
145 145 def substate(self):
146 146 return subrepo.state(self, self._repo.ui)
147 147
148 148 def subrev(self, subpath):
149 149 return self.substate[subpath][1]
150 150
151 151 def rev(self):
152 152 return self._rev
153 153 def node(self):
154 154 return self._node
155 155 def hex(self):
156 156 return hex(self.node())
157 157 def manifest(self):
158 158 return self._manifest
159 159 def repo(self):
160 160 return self._repo
161 161 def phasestr(self):
162 162 return phases.phasenames[self.phase()]
163 163 def mutable(self):
164 164 return self.phase() > phases.public
165 165
166 166 def getfileset(self, expr):
167 167 return fileset.getfileset(self, expr)
168 168
169 169 def obsolete(self):
170 170 """True if the changeset is obsolete"""
171 171 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
172 172
173 173 def extinct(self):
174 174 """True if the changeset is extinct"""
175 175 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
176 176
177 177 def unstable(self):
178 178 """True if the changeset is not obsolete but it's ancestor are"""
179 179 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
180 180
181 181 def bumped(self):
182 182 """True if the changeset try to be a successor of a public changeset
183 183
184 184 Only non-public and non-obsolete changesets may be bumped.
185 185 """
186 186 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
187 187
188 188 def divergent(self):
189 189 """Is a successors of a changeset with multiple possible successors set
190 190
191 191 Only non-public and non-obsolete changesets may be divergent.
192 192 """
193 193 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
194 194
195 195 def troubled(self):
196 196 """True if the changeset is either unstable, bumped or divergent"""
197 197 return self.unstable() or self.bumped() or self.divergent()
198 198
199 199 def troubles(self):
200 200 """return the list of troubles affecting this changesets.
201 201
202 202 Troubles are returned as strings. possible values are:
203 203 - unstable,
204 204 - bumped,
205 205 - divergent.
206 206 """
207 207 troubles = []
208 208 if self.unstable():
209 209 troubles.append('unstable')
210 210 if self.bumped():
211 211 troubles.append('bumped')
212 212 if self.divergent():
213 213 troubles.append('divergent')
214 214 return troubles
215 215
216 216 def parents(self):
217 217 """return contexts for each parent changeset"""
218 218 return self._parents
219 219
220 220 def p1(self):
221 221 return self._parents[0]
222 222
223 223 def p2(self):
224 224 if len(self._parents) == 2:
225 225 return self._parents[1]
226 226 return changectx(self._repo, -1)
227 227
228 228 def _fileinfo(self, path):
229 229 if '_manifest' in self.__dict__:
230 230 try:
231 231 return self._manifest[path], self._manifest.flags(path)
232 232 except KeyError:
233 233 raise error.ManifestLookupError(self._node, path,
234 234 _('not found in manifest'))
235 235 if '_manifestdelta' in self.__dict__ or path in self.files():
236 236 if path in self._manifestdelta:
237 237 return (self._manifestdelta[path],
238 238 self._manifestdelta.flags(path))
239 239 node, flag = self._repo.manifest.find(self._changeset[0], path)
240 240 if not node:
241 241 raise error.ManifestLookupError(self._node, path,
242 242 _('not found in manifest'))
243 243
244 244 return node, flag
245 245
246 246 def filenode(self, path):
247 247 return self._fileinfo(path)[0]
248 248
249 249 def flags(self, path):
250 250 try:
251 251 return self._fileinfo(path)[1]
252 252 except error.LookupError:
253 253 return ''
254 254
255 255 def sub(self, path):
256 256 '''return a subrepo for the stored revision of path, never wdir()'''
257 257 return subrepo.subrepo(self, path)
258 258
259 259 def nullsub(self, path, pctx):
260 260 return subrepo.nullsubrepo(self, path, pctx)
261 261
262 262 def workingsub(self, path):
263 263 '''return a subrepo for the stored revision, or wdir if this is a wdir
264 264 context.
265 265 '''
266 266 return subrepo.subrepo(self, path, allowwdir=True)
267 267
268 268 def match(self, pats=[], include=None, exclude=None, default='glob',
269 269 listsubrepos=False, badfn=None):
270 270 r = self._repo
271 271 return matchmod.match(r.root, r.getcwd(), pats,
272 272 include, exclude, default,
273 273 auditor=r.auditor, ctx=self,
274 274 listsubrepos=listsubrepos, badfn=badfn)
275 275
276 276 def diff(self, ctx2=None, match=None, **opts):
277 277 """Returns a diff generator for the given contexts and matcher"""
278 278 if ctx2 is None:
279 279 ctx2 = self.p1()
280 280 if ctx2 is not None:
281 281 ctx2 = self._repo[ctx2]
282 282 diffopts = patch.diffopts(self._repo.ui, opts)
283 283 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
284 284
285 285 def dirs(self):
286 286 return self._manifest.dirs()
287 287
288 288 def hasdir(self, dir):
289 289 return self._manifest.hasdir(dir)
290 290
291 291 def dirty(self, missing=False, merge=True, branch=True):
292 292 return False
293 293
294 294 def status(self, other=None, match=None, listignored=False,
295 295 listclean=False, listunknown=False, listsubrepos=False):
296 296 """return status of files between two nodes or node and working
297 297 directory.
298 298
299 299 If other is None, compare this node with working directory.
300 300
301 301 returns (modified, added, removed, deleted, unknown, ignored, clean)
302 302 """
303 303
304 304 ctx1 = self
305 305 ctx2 = self._repo[other]
306 306
307 307 # This next code block is, admittedly, fragile logic that tests for
308 308 # reversing the contexts and wouldn't need to exist if it weren't for
309 309 # the fast (and common) code path of comparing the working directory
310 310 # with its first parent.
311 311 #
312 312 # What we're aiming for here is the ability to call:
313 313 #
314 314 # workingctx.status(parentctx)
315 315 #
316 316 # If we always built the manifest for each context and compared those,
317 317 # then we'd be done. But the special case of the above call means we
318 318 # just copy the manifest of the parent.
319 319 reversed = False
320 320 if (not isinstance(ctx1, changectx)
321 321 and isinstance(ctx2, changectx)):
322 322 reversed = True
323 323 ctx1, ctx2 = ctx2, ctx1
324 324
325 325 match = ctx2._matchstatus(ctx1, match)
326 326 r = scmutil.status([], [], [], [], [], [], [])
327 327 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
328 328 listunknown)
329 329
330 330 if reversed:
331 331 # Reverse added and removed. Clear deleted, unknown and ignored as
332 332 # these make no sense to reverse.
333 333 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
334 334 r.clean)
335 335
336 336 if listsubrepos:
337 337 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
338 338 rev2 = ctx2.subrev(subpath)
339 339 try:
340 340 submatch = matchmod.narrowmatcher(subpath, match)
341 341 s = sub.status(rev2, match=submatch, ignored=listignored,
342 342 clean=listclean, unknown=listunknown,
343 343 listsubrepos=True)
344 344 for rfiles, sfiles in zip(r, s):
345 345 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
346 346 except error.LookupError:
347 347 self._repo.ui.status(_("skipping missing "
348 348 "subrepository: %s\n") % subpath)
349 349
350 350 for l in r:
351 351 l.sort()
352 352
353 353 return r
354 354
355 355
356 356 def makememctx(repo, parents, text, user, date, branch, files, store,
357 357 editor=None, extra=None):
358 358 def getfilectx(repo, memctx, path):
359 359 data, mode, copied = store.getfile(path)
360 360 if data is None:
361 361 return None
362 362 islink, isexec = mode
363 363 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
364 364 copied=copied, memctx=memctx)
365 365 if extra is None:
366 366 extra = {}
367 367 if branch:
368 368 extra['branch'] = encoding.fromlocal(branch)
369 369 ctx = memctx(repo, parents, text, files, getfilectx, user,
370 370 date, extra, editor)
371 371 return ctx
372 372
373 373 class changectx(basectx):
374 374 """A changecontext object makes access to data related to a particular
375 375 changeset convenient. It represents a read-only context already present in
376 376 the repo."""
377 377 def __init__(self, repo, changeid=''):
378 378 """changeid is a revision number, node, or tag"""
379 379
380 380 # since basectx.__new__ already took care of copying the object, we
381 381 # don't need to do anything in __init__, so we just exit here
382 382 if isinstance(changeid, basectx):
383 383 return
384 384
385 385 if changeid == '':
386 386 changeid = '.'
387 387 self._repo = repo
388 388
389 389 try:
390 390 if isinstance(changeid, int):
391 391 self._node = repo.changelog.node(changeid)
392 392 self._rev = changeid
393 393 return
394 394 if isinstance(changeid, long):
395 395 changeid = str(changeid)
396 396 if changeid == 'null':
397 397 self._node = nullid
398 398 self._rev = nullrev
399 399 return
400 400 if changeid == 'tip':
401 401 self._node = repo.changelog.tip()
402 402 self._rev = repo.changelog.rev(self._node)
403 403 return
404 404 if changeid == '.' or changeid == repo.dirstate.p1():
405 405 # this is a hack to delay/avoid loading obsmarkers
406 406 # when we know that '.' won't be hidden
407 407 self._node = repo.dirstate.p1()
408 408 self._rev = repo.unfiltered().changelog.rev(self._node)
409 409 return
410 410 if len(changeid) == 20:
411 411 try:
412 412 self._node = changeid
413 413 self._rev = repo.changelog.rev(changeid)
414 414 return
415 415 except error.FilteredRepoLookupError:
416 416 raise
417 417 except LookupError:
418 418 pass
419 419
420 420 try:
421 421 r = int(changeid)
422 422 if str(r) != changeid:
423 423 raise ValueError
424 424 l = len(repo.changelog)
425 425 if r < 0:
426 426 r += l
427 427 if r < 0 or r >= l:
428 428 raise ValueError
429 429 self._rev = r
430 430 self._node = repo.changelog.node(r)
431 431 return
432 432 except error.FilteredIndexError:
433 433 raise
434 434 except (ValueError, OverflowError, IndexError):
435 435 pass
436 436
437 437 if len(changeid) == 40:
438 438 try:
439 439 self._node = bin(changeid)
440 440 self._rev = repo.changelog.rev(self._node)
441 441 return
442 442 except error.FilteredLookupError:
443 443 raise
444 444 except (TypeError, LookupError):
445 445 pass
446 446
447 447 # lookup bookmarks through the name interface
448 448 try:
449 449 self._node = repo.names.singlenode(repo, changeid)
450 450 self._rev = repo.changelog.rev(self._node)
451 451 return
452 452 except KeyError:
453 453 pass
454 454 except error.FilteredRepoLookupError:
455 455 raise
456 456 except error.RepoLookupError:
457 457 pass
458 458
459 459 self._node = repo.unfiltered().changelog._partialmatch(changeid)
460 460 if self._node is not None:
461 461 self._rev = repo.changelog.rev(self._node)
462 462 return
463 463
464 464 # lookup failed
465 465 # check if it might have come from damaged dirstate
466 466 #
467 467 # XXX we could avoid the unfiltered if we had a recognizable
468 468 # exception for filtered changeset access
469 469 if changeid in repo.unfiltered().dirstate.parents():
470 470 msg = _("working directory has unknown parent '%s'!")
471 471 raise error.Abort(msg % short(changeid))
472 472 try:
473 473 if len(changeid) == 20 and nonascii(changeid):
474 474 changeid = hex(changeid)
475 475 except TypeError:
476 476 pass
477 477 except (error.FilteredIndexError, error.FilteredLookupError,
478 478 error.FilteredRepoLookupError):
479 479 if repo.filtername.startswith('visible'):
480 480 msg = _("hidden revision '%s'") % changeid
481 481 hint = _('use --hidden to access hidden revisions')
482 482 raise error.FilteredRepoLookupError(msg, hint=hint)
483 483 msg = _("filtered revision '%s' (not in '%s' subset)")
484 484 msg %= (changeid, repo.filtername)
485 485 raise error.FilteredRepoLookupError(msg)
486 486 except IndexError:
487 487 pass
488 488 raise error.RepoLookupError(
489 489 _("unknown revision '%s'") % changeid)
490 490
491 491 def __hash__(self):
492 492 try:
493 493 return hash(self._rev)
494 494 except AttributeError:
495 495 return id(self)
496 496
497 497 def __nonzero__(self):
498 498 return self._rev != nullrev
499 499
500 500 @propertycache
501 501 def _changeset(self):
502 502 return self._repo.changelog.read(self.rev())
503 503
504 504 @propertycache
505 505 def _manifest(self):
506 506 return self._repo.manifest.read(self._changeset[0])
507 507
508 508 @propertycache
509 509 def _manifestdelta(self):
510 510 return self._repo.manifest.readdelta(self._changeset[0])
511 511
512 512 @propertycache
513 513 def _parents(self):
514 514 p = self._repo.changelog.parentrevs(self._rev)
515 515 if p[1] == nullrev:
516 516 p = p[:-1]
517 517 return [changectx(self._repo, x) for x in p]
518 518
519 519 def changeset(self):
520 520 return self._changeset
521 521 def manifestnode(self):
522 522 return self._changeset[0]
523 523
524 524 def user(self):
525 525 return self._changeset[1]
526 526 def date(self):
527 527 return self._changeset[2]
528 528 def files(self):
529 529 return self._changeset[3]
530 530 def description(self):
531 531 return self._changeset[4]
532 532 def branch(self):
533 533 return encoding.tolocal(self._changeset[5].get("branch"))
534 534 def closesbranch(self):
535 535 return 'close' in self._changeset[5]
536 536 def extra(self):
537 537 return self._changeset[5]
538 538 def tags(self):
539 539 return self._repo.nodetags(self._node)
540 540 def bookmarks(self):
541 541 return self._repo.nodebookmarks(self._node)
542 542 def phase(self):
543 543 return self._repo._phasecache.phase(self._repo, self._rev)
544 544 def hidden(self):
545 545 return self._rev in repoview.filterrevs(self._repo, 'visible')
546 546
547 547 def children(self):
548 548 """return contexts for each child changeset"""
549 549 c = self._repo.changelog.children(self._node)
550 550 return [changectx(self._repo, x) for x in c]
551 551
552 552 def ancestors(self):
553 553 for a in self._repo.changelog.ancestors([self._rev]):
554 554 yield changectx(self._repo, a)
555 555
556 556 def descendants(self):
557 557 for d in self._repo.changelog.descendants([self._rev]):
558 558 yield changectx(self._repo, d)
559 559
560 560 def filectx(self, path, fileid=None, filelog=None):
561 561 """get a file context from this changeset"""
562 562 if fileid is None:
563 563 fileid = self.filenode(path)
564 564 return filectx(self._repo, path, fileid=fileid,
565 565 changectx=self, filelog=filelog)
566 566
567 567 def ancestor(self, c2, warn=False):
568 568 """return the "best" ancestor context of self and c2
569 569
570 570 If there are multiple candidates, it will show a message and check
571 571 merge.preferancestor configuration before falling back to the
572 572 revlog ancestor."""
573 573 # deal with workingctxs
574 574 n2 = c2._node
575 575 if n2 is None:
576 576 n2 = c2._parents[0]._node
577 577 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
578 578 if not cahs:
579 579 anc = nullid
580 580 elif len(cahs) == 1:
581 581 anc = cahs[0]
582 582 else:
583 583 # experimental config: merge.preferancestor
584 584 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
585 585 try:
586 586 ctx = changectx(self._repo, r)
587 587 except error.RepoLookupError:
588 588 continue
589 589 anc = ctx.node()
590 590 if anc in cahs:
591 591 break
592 592 else:
593 593 anc = self._repo.changelog.ancestor(self._node, n2)
594 594 if warn:
595 595 self._repo.ui.status(
596 596 (_("note: using %s as ancestor of %s and %s\n") %
597 597 (short(anc), short(self._node), short(n2))) +
598 598 ''.join(_(" alternatively, use --config "
599 599 "merge.preferancestor=%s\n") %
600 600 short(n) for n in sorted(cahs) if n != anc))
601 601 return changectx(self._repo, anc)
602 602
603 603 def descendant(self, other):
604 604 """True if other is descendant of this changeset"""
605 605 return self._repo.changelog.descendant(self._rev, other._rev)
606 606
607 607 def walk(self, match):
608 608 '''Generates matching file names.'''
609 609
610 610 # Wrap match.bad method to have message with nodeid
611 611 def bad(fn, msg):
612 612 # The manifest doesn't know about subrepos, so don't complain about
613 613 # paths into valid subrepos.
614 614 if any(fn == s or fn.startswith(s + '/')
615 615 for s in self.substate):
616 616 return
617 617 match.bad(fn, _('no such file in rev %s') % self)
618 618
619 619 m = matchmod.badmatch(match, bad)
620 620 return self._manifest.walk(m)
621 621
622 622 def matches(self, match):
623 623 return self.walk(match)
624 624
625 625 class basefilectx(object):
626 626 """A filecontext object represents the common logic for its children:
627 627 filectx: read-only access to a filerevision that is already present
628 628 in the repo,
629 629 workingfilectx: a filecontext that represents files from the working
630 630 directory,
631 631 memfilectx: a filecontext that represents files in-memory."""
632 632 def __new__(cls, repo, path, *args, **kwargs):
633 633 return super(basefilectx, cls).__new__(cls)
634 634
635 635 @propertycache
636 636 def _filelog(self):
637 637 return self._repo.file(self._path)
638 638
639 639 @propertycache
640 640 def _changeid(self):
641 641 if '_changeid' in self.__dict__:
642 642 return self._changeid
643 643 elif '_changectx' in self.__dict__:
644 644 return self._changectx.rev()
645 645 elif '_descendantrev' in self.__dict__:
646 646 # this file context was created from a revision with a known
647 647 # descendant, we can (lazily) correct for linkrev aliases
648 648 return self._adjustlinkrev(self._path, self._filelog,
649 649 self._filenode, self._descendantrev)
650 650 else:
651 651 return self._filelog.linkrev(self._filerev)
652 652
653 653 @propertycache
654 654 def _filenode(self):
655 655 if '_fileid' in self.__dict__:
656 656 return self._filelog.lookup(self._fileid)
657 657 else:
658 658 return self._changectx.filenode(self._path)
659 659
660 660 @propertycache
661 661 def _filerev(self):
662 662 return self._filelog.rev(self._filenode)
663 663
664 664 @propertycache
665 665 def _repopath(self):
666 666 return self._path
667 667
668 668 def __nonzero__(self):
669 669 try:
670 670 self._filenode
671 671 return True
672 672 except error.LookupError:
673 673 # file is missing
674 674 return False
675 675
676 676 def __str__(self):
677 677 return "%s@%s" % (self.path(), self._changectx)
678 678
679 679 def __repr__(self):
680 680 return "<%s %s>" % (type(self).__name__, str(self))
681 681
682 682 def __hash__(self):
683 683 try:
684 684 return hash((self._path, self._filenode))
685 685 except AttributeError:
686 686 return id(self)
687 687
688 688 def __eq__(self, other):
689 689 try:
690 690 return (type(self) == type(other) and self._path == other._path
691 691 and self._filenode == other._filenode)
692 692 except AttributeError:
693 693 return False
694 694
695 695 def __ne__(self, other):
696 696 return not (self == other)
697 697
698 698 def filerev(self):
699 699 return self._filerev
700 700 def filenode(self):
701 701 return self._filenode
702 702 def flags(self):
703 703 return self._changectx.flags(self._path)
704 704 def filelog(self):
705 705 return self._filelog
706 706 def rev(self):
707 707 return self._changeid
708 708 def linkrev(self):
709 709 return self._filelog.linkrev(self._filerev)
710 710 def node(self):
711 711 return self._changectx.node()
712 712 def hex(self):
713 713 return self._changectx.hex()
714 714 def user(self):
715 715 return self._changectx.user()
716 716 def date(self):
717 717 return self._changectx.date()
718 718 def files(self):
719 719 return self._changectx.files()
720 720 def description(self):
721 721 return self._changectx.description()
722 722 def branch(self):
723 723 return self._changectx.branch()
724 724 def extra(self):
725 725 return self._changectx.extra()
726 726 def phase(self):
727 727 return self._changectx.phase()
728 728 def phasestr(self):
729 729 return self._changectx.phasestr()
730 730 def manifest(self):
731 731 return self._changectx.manifest()
732 732 def changectx(self):
733 733 return self._changectx
734 734 def repo(self):
735 735 return self._repo
736 736
737 737 def path(self):
738 738 return self._path
739 739
740 740 def isbinary(self):
741 741 try:
742 742 return util.binary(self.data())
743 743 except IOError:
744 744 return False
745 745 def isexec(self):
746 746 return 'x' in self.flags()
747 747 def islink(self):
748 748 return 'l' in self.flags()
749 749
750 _customcmp = False
750 751 def cmp(self, fctx):
751 752 """compare with other file context
752 753
753 754 returns True if different than fctx.
754 755 """
756 if fctx._customcmp:
757 return fctx.cmp(self)
758
755 759 if (fctx._filerev is None
756 760 and (self._repo._encodefilterpats
757 761 # if file data starts with '\1\n', empty metadata block is
758 762 # prepended, which adds 4 bytes to filelog.size().
759 763 or self.size() - 4 == fctx.size())
760 764 or self.size() == fctx.size()):
761 765 return self._filelog.cmp(self._filenode, fctx.data())
762 766
763 767 return True
764 768
765 769 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
766 770 """return the first ancestor of <srcrev> introducing <fnode>
767 771
768 772 If the linkrev of the file revision does not point to an ancestor of
769 773 srcrev, we'll walk down the ancestors until we find one introducing
770 774 this file revision.
771 775
772 776 :repo: a localrepository object (used to access changelog and manifest)
773 777 :path: the file path
774 778 :fnode: the nodeid of the file revision
775 779 :filelog: the filelog of this path
776 780 :srcrev: the changeset revision we search ancestors from
777 781 :inclusive: if true, the src revision will also be checked
778 782 """
779 783 repo = self._repo
780 784 cl = repo.unfiltered().changelog
781 785 ma = repo.manifest
782 786 # fetch the linkrev
783 787 fr = filelog.rev(fnode)
784 788 lkr = filelog.linkrev(fr)
785 789 # hack to reuse ancestor computation when searching for renames
786 790 memberanc = getattr(self, '_ancestrycontext', None)
787 791 iteranc = None
788 792 if srcrev is None:
789 793 # wctx case, used by workingfilectx during mergecopy
790 794 revs = [p.rev() for p in self._repo[None].parents()]
791 795 inclusive = True # we skipped the real (revless) source
792 796 else:
793 797 revs = [srcrev]
794 798 if memberanc is None:
795 799 memberanc = iteranc = cl.ancestors(revs, lkr,
796 800 inclusive=inclusive)
797 801 # check if this linkrev is an ancestor of srcrev
798 802 if lkr not in memberanc:
799 803 if iteranc is None:
800 804 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
801 805 for a in iteranc:
802 806 ac = cl.read(a) # get changeset data (we avoid object creation)
803 807 if path in ac[3]: # checking the 'files' field.
804 808 # The file has been touched, check if the content is
805 809 # similar to the one we search for.
806 810 if fnode == ma.readfast(ac[0]).get(path):
807 811 return a
808 812 # In theory, we should never get out of that loop without a result.
809 813 # But if manifest uses a buggy file revision (not children of the
810 814 # one it replaces) we could. Such a buggy situation will likely
811 815 # result is crash somewhere else at to some point.
812 816 return lkr
813 817
814 818 def introrev(self):
815 819 """return the rev of the changeset which introduced this file revision
816 820
817 821 This method is different from linkrev because it take into account the
818 822 changeset the filectx was created from. It ensures the returned
819 823 revision is one of its ancestors. This prevents bugs from
820 824 'linkrev-shadowing' when a file revision is used by multiple
821 825 changesets.
822 826 """
823 827 lkr = self.linkrev()
824 828 attrs = vars(self)
825 829 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
826 830 if noctx or self.rev() == lkr:
827 831 return self.linkrev()
828 832 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
829 833 self.rev(), inclusive=True)
830 834
831 835 def _parentfilectx(self, path, fileid, filelog):
832 836 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
833 837 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
834 838 if '_changeid' in vars(self) or '_changectx' in vars(self):
835 839 # If self is associated with a changeset (probably explicitly
836 840 # fed), ensure the created filectx is associated with a
837 841 # changeset that is an ancestor of self.changectx.
838 842 # This lets us later use _adjustlinkrev to get a correct link.
839 843 fctx._descendantrev = self.rev()
840 844 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
841 845 elif '_descendantrev' in vars(self):
842 846 # Otherwise propagate _descendantrev if we have one associated.
843 847 fctx._descendantrev = self._descendantrev
844 848 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
845 849 return fctx
846 850
847 851 def parents(self):
848 852 _path = self._path
849 853 fl = self._filelog
850 854 parents = self._filelog.parents(self._filenode)
851 855 pl = [(_path, node, fl) for node in parents if node != nullid]
852 856
853 857 r = fl.renamed(self._filenode)
854 858 if r:
855 859 # - In the simple rename case, both parent are nullid, pl is empty.
856 860 # - In case of merge, only one of the parent is null id and should
857 861 # be replaced with the rename information. This parent is -always-
858 862 # the first one.
859 863 #
860 864 # As null id have always been filtered out in the previous list
861 865 # comprehension, inserting to 0 will always result in "replacing
862 866 # first nullid parent with rename information.
863 867 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
864 868
865 869 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
866 870
867 871 def p1(self):
868 872 return self.parents()[0]
869 873
870 874 def p2(self):
871 875 p = self.parents()
872 876 if len(p) == 2:
873 877 return p[1]
874 878 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
875 879
876 880 def annotate(self, follow=False, linenumber=None, diffopts=None):
877 881 '''returns a list of tuples of (ctx, line) for each line
878 882 in the file, where ctx is the filectx of the node where
879 883 that line was last changed.
880 884 This returns tuples of ((ctx, linenumber), line) for each line,
881 885 if "linenumber" parameter is NOT "None".
882 886 In such tuples, linenumber means one at the first appearance
883 887 in the managed file.
884 888 To reduce annotation cost,
885 889 this returns fixed value(False is used) as linenumber,
886 890 if "linenumber" parameter is "False".'''
887 891
888 892 if linenumber is None:
889 893 def decorate(text, rev):
890 894 return ([rev] * len(text.splitlines()), text)
891 895 elif linenumber:
892 896 def decorate(text, rev):
893 897 size = len(text.splitlines())
894 898 return ([(rev, i) for i in xrange(1, size + 1)], text)
895 899 else:
896 900 def decorate(text, rev):
897 901 return ([(rev, False)] * len(text.splitlines()), text)
898 902
899 903 def pair(parent, child):
900 904 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
901 905 refine=True)
902 906 for (a1, a2, b1, b2), t in blocks:
903 907 # Changed blocks ('!') or blocks made only of blank lines ('~')
904 908 # belong to the child.
905 909 if t == '=':
906 910 child[0][b1:b2] = parent[0][a1:a2]
907 911 return child
908 912
909 913 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
910 914
911 915 def parents(f):
912 916 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
913 917 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
914 918 # from the topmost introrev (= srcrev) down to p.linkrev() if it
915 919 # isn't an ancestor of the srcrev.
916 920 f._changeid
917 921 pl = f.parents()
918 922
919 923 # Don't return renamed parents if we aren't following.
920 924 if not follow:
921 925 pl = [p for p in pl if p.path() == f.path()]
922 926
923 927 # renamed filectx won't have a filelog yet, so set it
924 928 # from the cache to save time
925 929 for p in pl:
926 930 if not '_filelog' in p.__dict__:
927 931 p._filelog = getlog(p.path())
928 932
929 933 return pl
930 934
931 935 # use linkrev to find the first changeset where self appeared
932 936 base = self
933 937 introrev = self.introrev()
934 938 if self.rev() != introrev:
935 939 base = self.filectx(self.filenode(), changeid=introrev)
936 940 if getattr(base, '_ancestrycontext', None) is None:
937 941 cl = self._repo.changelog
938 942 if introrev is None:
939 943 # wctx is not inclusive, but works because _ancestrycontext
940 944 # is used to test filelog revisions
941 945 ac = cl.ancestors([p.rev() for p in base.parents()],
942 946 inclusive=True)
943 947 else:
944 948 ac = cl.ancestors([introrev], inclusive=True)
945 949 base._ancestrycontext = ac
946 950
947 951 # This algorithm would prefer to be recursive, but Python is a
948 952 # bit recursion-hostile. Instead we do an iterative
949 953 # depth-first search.
950 954
951 955 visit = [base]
952 956 hist = {}
953 957 pcache = {}
954 958 needed = {base: 1}
955 959 while visit:
956 960 f = visit[-1]
957 961 pcached = f in pcache
958 962 if not pcached:
959 963 pcache[f] = parents(f)
960 964
961 965 ready = True
962 966 pl = pcache[f]
963 967 for p in pl:
964 968 if p not in hist:
965 969 ready = False
966 970 visit.append(p)
967 971 if not pcached:
968 972 needed[p] = needed.get(p, 0) + 1
969 973 if ready:
970 974 visit.pop()
971 975 reusable = f in hist
972 976 if reusable:
973 977 curr = hist[f]
974 978 else:
975 979 curr = decorate(f.data(), f)
976 980 for p in pl:
977 981 if not reusable:
978 982 curr = pair(hist[p], curr)
979 983 if needed[p] == 1:
980 984 del hist[p]
981 985 del needed[p]
982 986 else:
983 987 needed[p] -= 1
984 988
985 989 hist[f] = curr
986 990 pcache[f] = []
987 991
988 992 return zip(hist[base][0], hist[base][1].splitlines(True))
989 993
990 994 def ancestors(self, followfirst=False):
991 995 visit = {}
992 996 c = self
993 997 if followfirst:
994 998 cut = 1
995 999 else:
996 1000 cut = None
997 1001
998 1002 while True:
999 1003 for parent in c.parents()[:cut]:
1000 1004 visit[(parent.linkrev(), parent.filenode())] = parent
1001 1005 if not visit:
1002 1006 break
1003 1007 c = visit.pop(max(visit))
1004 1008 yield c
1005 1009
1006 1010 class filectx(basefilectx):
1007 1011 """A filecontext object makes access to data related to a particular
1008 1012 filerevision convenient."""
1009 1013 def __init__(self, repo, path, changeid=None, fileid=None,
1010 1014 filelog=None, changectx=None):
1011 1015 """changeid can be a changeset revision, node, or tag.
1012 1016 fileid can be a file revision or node."""
1013 1017 self._repo = repo
1014 1018 self._path = path
1015 1019
1016 1020 assert (changeid is not None
1017 1021 or fileid is not None
1018 1022 or changectx is not None), \
1019 1023 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1020 1024 % (changeid, fileid, changectx))
1021 1025
1022 1026 if filelog is not None:
1023 1027 self._filelog = filelog
1024 1028
1025 1029 if changeid is not None:
1026 1030 self._changeid = changeid
1027 1031 if changectx is not None:
1028 1032 self._changectx = changectx
1029 1033 if fileid is not None:
1030 1034 self._fileid = fileid
1031 1035
1032 1036 @propertycache
1033 1037 def _changectx(self):
1034 1038 try:
1035 1039 return changectx(self._repo, self._changeid)
1036 1040 except error.FilteredRepoLookupError:
1037 1041 # Linkrev may point to any revision in the repository. When the
1038 1042 # repository is filtered this may lead to `filectx` trying to build
1039 1043 # `changectx` for filtered revision. In such case we fallback to
1040 1044 # creating `changectx` on the unfiltered version of the reposition.
1041 1045 # This fallback should not be an issue because `changectx` from
1042 1046 # `filectx` are not used in complex operations that care about
1043 1047 # filtering.
1044 1048 #
1045 1049 # This fallback is a cheap and dirty fix that prevent several
1046 1050 # crashes. It does not ensure the behavior is correct. However the
1047 1051 # behavior was not correct before filtering either and "incorrect
1048 1052 # behavior" is seen as better as "crash"
1049 1053 #
1050 1054 # Linkrevs have several serious troubles with filtering that are
1051 1055 # complicated to solve. Proper handling of the issue here should be
1052 1056 # considered when solving linkrev issue are on the table.
1053 1057 return changectx(self._repo.unfiltered(), self._changeid)
1054 1058
1055 1059 def filectx(self, fileid, changeid=None):
1056 1060 '''opens an arbitrary revision of the file without
1057 1061 opening a new filelog'''
1058 1062 return filectx(self._repo, self._path, fileid=fileid,
1059 1063 filelog=self._filelog, changeid=changeid)
1060 1064
1061 1065 def data(self):
1062 1066 try:
1063 1067 return self._filelog.read(self._filenode)
1064 1068 except error.CensoredNodeError:
1065 1069 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1066 1070 return ""
1067 1071 raise error.Abort(_("censored node: %s") % short(self._filenode),
1068 1072 hint=_("set censor.policy to ignore errors"))
1069 1073
1070 1074 def size(self):
1071 1075 return self._filelog.size(self._filerev)
1072 1076
1073 1077 def renamed(self):
1074 1078 """check if file was actually renamed in this changeset revision
1075 1079
1076 1080 If rename logged in file revision, we report copy for changeset only
1077 1081 if file revisions linkrev points back to the changeset in question
1078 1082 or both changeset parents contain different file revisions.
1079 1083 """
1080 1084
1081 1085 renamed = self._filelog.renamed(self._filenode)
1082 1086 if not renamed:
1083 1087 return renamed
1084 1088
1085 1089 if self.rev() == self.linkrev():
1086 1090 return renamed
1087 1091
1088 1092 name = self.path()
1089 1093 fnode = self._filenode
1090 1094 for p in self._changectx.parents():
1091 1095 try:
1092 1096 if fnode == p.filenode(name):
1093 1097 return None
1094 1098 except error.LookupError:
1095 1099 pass
1096 1100 return renamed
1097 1101
1098 1102 def children(self):
1099 1103 # hard for renames
1100 1104 c = self._filelog.children(self._filenode)
1101 1105 return [filectx(self._repo, self._path, fileid=x,
1102 1106 filelog=self._filelog) for x in c]
1103 1107
1104 1108 class committablectx(basectx):
1105 1109 """A committablectx object provides common functionality for a context that
1106 1110 wants the ability to commit, e.g. workingctx or memctx."""
1107 1111 def __init__(self, repo, text="", user=None, date=None, extra=None,
1108 1112 changes=None):
1109 1113 self._repo = repo
1110 1114 self._rev = None
1111 1115 self._node = None
1112 1116 self._text = text
1113 1117 if date:
1114 1118 self._date = util.parsedate(date)
1115 1119 if user:
1116 1120 self._user = user
1117 1121 if changes:
1118 1122 self._status = changes
1119 1123
1120 1124 self._extra = {}
1121 1125 if extra:
1122 1126 self._extra = extra.copy()
1123 1127 if 'branch' not in self._extra:
1124 1128 try:
1125 1129 branch = encoding.fromlocal(self._repo.dirstate.branch())
1126 1130 except UnicodeDecodeError:
1127 1131 raise error.Abort(_('branch name not in UTF-8!'))
1128 1132 self._extra['branch'] = branch
1129 1133 if self._extra['branch'] == '':
1130 1134 self._extra['branch'] = 'default'
1131 1135
1132 1136 def __str__(self):
1133 1137 return str(self._parents[0]) + "+"
1134 1138
1135 1139 def __nonzero__(self):
1136 1140 return True
1137 1141
1138 1142 def _buildflagfunc(self):
1139 1143 # Create a fallback function for getting file flags when the
1140 1144 # filesystem doesn't support them
1141 1145
1142 1146 copiesget = self._repo.dirstate.copies().get
1143 1147
1144 1148 if len(self._parents) < 2:
1145 1149 # when we have one parent, it's easy: copy from parent
1146 1150 man = self._parents[0].manifest()
1147 1151 def func(f):
1148 1152 f = copiesget(f, f)
1149 1153 return man.flags(f)
1150 1154 else:
1151 1155 # merges are tricky: we try to reconstruct the unstored
1152 1156 # result from the merge (issue1802)
1153 1157 p1, p2 = self._parents
1154 1158 pa = p1.ancestor(p2)
1155 1159 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1156 1160
1157 1161 def func(f):
1158 1162 f = copiesget(f, f) # may be wrong for merges with copies
1159 1163 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1160 1164 if fl1 == fl2:
1161 1165 return fl1
1162 1166 if fl1 == fla:
1163 1167 return fl2
1164 1168 if fl2 == fla:
1165 1169 return fl1
1166 1170 return '' # punt for conflicts
1167 1171
1168 1172 return func
1169 1173
1170 1174 @propertycache
1171 1175 def _flagfunc(self):
1172 1176 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1173 1177
1174 1178 @propertycache
1175 1179 def _manifest(self):
1176 1180 """generate a manifest corresponding to the values in self._status
1177 1181
1178 1182 This reuse the file nodeid from parent, but we append an extra letter
1179 1183 when modified. Modified files get an extra 'm' while added files get
1180 1184 an extra 'a'. This is used by manifests merge to see that files
1181 1185 are different and by update logic to avoid deleting newly added files.
1182 1186 """
1183 1187
1184 1188 man1 = self._parents[0].manifest()
1185 1189 man = man1.copy()
1186 1190 if len(self._parents) > 1:
1187 1191 man2 = self.p2().manifest()
1188 1192 def getman(f):
1189 1193 if f in man1:
1190 1194 return man1
1191 1195 return man2
1192 1196 else:
1193 1197 getman = lambda f: man1
1194 1198
1195 1199 copied = self._repo.dirstate.copies()
1196 1200 ff = self._flagfunc
1197 1201 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1198 1202 for f in l:
1199 1203 orig = copied.get(f, f)
1200 1204 man[f] = getman(orig).get(orig, nullid) + i
1201 1205 try:
1202 1206 man.setflag(f, ff(f))
1203 1207 except OSError:
1204 1208 pass
1205 1209
1206 1210 for f in self._status.deleted + self._status.removed:
1207 1211 if f in man:
1208 1212 del man[f]
1209 1213
1210 1214 return man
1211 1215
1212 1216 @propertycache
1213 1217 def _status(self):
1214 1218 return self._repo.status()
1215 1219
1216 1220 @propertycache
1217 1221 def _user(self):
1218 1222 return self._repo.ui.username()
1219 1223
1220 1224 @propertycache
1221 1225 def _date(self):
1222 1226 return util.makedate()
1223 1227
1224 1228 def subrev(self, subpath):
1225 1229 return None
1226 1230
1227 1231 def manifestnode(self):
1228 1232 return None
1229 1233 def user(self):
1230 1234 return self._user or self._repo.ui.username()
1231 1235 def date(self):
1232 1236 return self._date
1233 1237 def description(self):
1234 1238 return self._text
1235 1239 def files(self):
1236 1240 return sorted(self._status.modified + self._status.added +
1237 1241 self._status.removed)
1238 1242
1239 1243 def modified(self):
1240 1244 return self._status.modified
1241 1245 def added(self):
1242 1246 return self._status.added
1243 1247 def removed(self):
1244 1248 return self._status.removed
1245 1249 def deleted(self):
1246 1250 return self._status.deleted
1247 1251 def branch(self):
1248 1252 return encoding.tolocal(self._extra['branch'])
1249 1253 def closesbranch(self):
1250 1254 return 'close' in self._extra
1251 1255 def extra(self):
1252 1256 return self._extra
1253 1257
1254 1258 def tags(self):
1255 1259 return []
1256 1260
1257 1261 def bookmarks(self):
1258 1262 b = []
1259 1263 for p in self.parents():
1260 1264 b.extend(p.bookmarks())
1261 1265 return b
1262 1266
1263 1267 def phase(self):
1264 1268 phase = phases.draft # default phase to draft
1265 1269 for p in self.parents():
1266 1270 phase = max(phase, p.phase())
1267 1271 return phase
1268 1272
1269 1273 def hidden(self):
1270 1274 return False
1271 1275
1272 1276 def children(self):
1273 1277 return []
1274 1278
1275 1279 def flags(self, path):
1276 1280 if '_manifest' in self.__dict__:
1277 1281 try:
1278 1282 return self._manifest.flags(path)
1279 1283 except KeyError:
1280 1284 return ''
1281 1285
1282 1286 try:
1283 1287 return self._flagfunc(path)
1284 1288 except OSError:
1285 1289 return ''
1286 1290
1287 1291 def ancestor(self, c2):
1288 1292 """return the "best" ancestor context of self and c2"""
1289 1293 return self._parents[0].ancestor(c2) # punt on two parents for now
1290 1294
1291 1295 def walk(self, match):
1292 1296 '''Generates matching file names.'''
1293 1297 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1294 1298 True, False))
1295 1299
1296 1300 def matches(self, match):
1297 1301 return sorted(self._repo.dirstate.matches(match))
1298 1302
1299 1303 def ancestors(self):
1300 1304 for p in self._parents:
1301 1305 yield p
1302 1306 for a in self._repo.changelog.ancestors(
1303 1307 [p.rev() for p in self._parents]):
1304 1308 yield changectx(self._repo, a)
1305 1309
1306 1310 def markcommitted(self, node):
1307 1311 """Perform post-commit cleanup necessary after committing this ctx
1308 1312
1309 1313 Specifically, this updates backing stores this working context
1310 1314 wraps to reflect the fact that the changes reflected by this
1311 1315 workingctx have been committed. For example, it marks
1312 1316 modified and added files as normal in the dirstate.
1313 1317
1314 1318 """
1315 1319
1316 1320 self._repo.dirstate.beginparentchange()
1317 1321 for f in self.modified() + self.added():
1318 1322 self._repo.dirstate.normal(f)
1319 1323 for f in self.removed():
1320 1324 self._repo.dirstate.drop(f)
1321 1325 self._repo.dirstate.setparents(node)
1322 1326 self._repo.dirstate.endparentchange()
1323 1327
1324 1328 # write changes out explicitly, because nesting wlock at
1325 1329 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1326 1330 # from immediately doing so for subsequent changing files
1327 1331 self._repo.dirstate.write(self._repo.currenttransaction())
1328 1332
1329 1333 class workingctx(committablectx):
1330 1334 """A workingctx object makes access to data related to
1331 1335 the current working directory convenient.
1332 1336 date - any valid date string or (unixtime, offset), or None.
1333 1337 user - username string, or None.
1334 1338 extra - a dictionary of extra values, or None.
1335 1339 changes - a list of file lists as returned by localrepo.status()
1336 1340 or None to use the repository status.
1337 1341 """
1338 1342 def __init__(self, repo, text="", user=None, date=None, extra=None,
1339 1343 changes=None):
1340 1344 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1341 1345
1342 1346 def __iter__(self):
1343 1347 d = self._repo.dirstate
1344 1348 for f in d:
1345 1349 if d[f] != 'r':
1346 1350 yield f
1347 1351
1348 1352 def __contains__(self, key):
1349 1353 return self._repo.dirstate[key] not in "?r"
1350 1354
1351 1355 def hex(self):
1352 1356 return hex(wdirid)
1353 1357
1354 1358 @propertycache
1355 1359 def _parents(self):
1356 1360 p = self._repo.dirstate.parents()
1357 1361 if p[1] == nullid:
1358 1362 p = p[:-1]
1359 1363 return [changectx(self._repo, x) for x in p]
1360 1364
1361 1365 def filectx(self, path, filelog=None):
1362 1366 """get a file context from the working directory"""
1363 1367 return workingfilectx(self._repo, path, workingctx=self,
1364 1368 filelog=filelog)
1365 1369
1366 1370 def dirty(self, missing=False, merge=True, branch=True):
1367 1371 "check whether a working directory is modified"
1368 1372 # check subrepos first
1369 1373 for s in sorted(self.substate):
1370 1374 if self.sub(s).dirty():
1371 1375 return True
1372 1376 # check current working dir
1373 1377 return ((merge and self.p2()) or
1374 1378 (branch and self.branch() != self.p1().branch()) or
1375 1379 self.modified() or self.added() or self.removed() or
1376 1380 (missing and self.deleted()))
1377 1381
1378 1382 def add(self, list, prefix=""):
1379 1383 join = lambda f: os.path.join(prefix, f)
1380 1384 wlock = self._repo.wlock()
1381 1385 ui, ds = self._repo.ui, self._repo.dirstate
1382 1386 try:
1383 1387 rejected = []
1384 1388 lstat = self._repo.wvfs.lstat
1385 1389 for f in list:
1386 1390 scmutil.checkportable(ui, join(f))
1387 1391 try:
1388 1392 st = lstat(f)
1389 1393 except OSError:
1390 1394 ui.warn(_("%s does not exist!\n") % join(f))
1391 1395 rejected.append(f)
1392 1396 continue
1393 1397 if st.st_size > 10000000:
1394 1398 ui.warn(_("%s: up to %d MB of RAM may be required "
1395 1399 "to manage this file\n"
1396 1400 "(use 'hg revert %s' to cancel the "
1397 1401 "pending addition)\n")
1398 1402 % (f, 3 * st.st_size // 1000000, join(f)))
1399 1403 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1400 1404 ui.warn(_("%s not added: only files and symlinks "
1401 1405 "supported currently\n") % join(f))
1402 1406 rejected.append(f)
1403 1407 elif ds[f] in 'amn':
1404 1408 ui.warn(_("%s already tracked!\n") % join(f))
1405 1409 elif ds[f] == 'r':
1406 1410 ds.normallookup(f)
1407 1411 else:
1408 1412 ds.add(f)
1409 1413 return rejected
1410 1414 finally:
1411 1415 wlock.release()
1412 1416
1413 1417 def forget(self, files, prefix=""):
1414 1418 join = lambda f: os.path.join(prefix, f)
1415 1419 wlock = self._repo.wlock()
1416 1420 try:
1417 1421 rejected = []
1418 1422 for f in files:
1419 1423 if f not in self._repo.dirstate:
1420 1424 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1421 1425 rejected.append(f)
1422 1426 elif self._repo.dirstate[f] != 'a':
1423 1427 self._repo.dirstate.remove(f)
1424 1428 else:
1425 1429 self._repo.dirstate.drop(f)
1426 1430 return rejected
1427 1431 finally:
1428 1432 wlock.release()
1429 1433
1430 1434 def undelete(self, list):
1431 1435 pctxs = self.parents()
1432 1436 wlock = self._repo.wlock()
1433 1437 try:
1434 1438 for f in list:
1435 1439 if self._repo.dirstate[f] != 'r':
1436 1440 self._repo.ui.warn(_("%s not removed!\n") % f)
1437 1441 else:
1438 1442 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1439 1443 t = fctx.data()
1440 1444 self._repo.wwrite(f, t, fctx.flags())
1441 1445 self._repo.dirstate.normal(f)
1442 1446 finally:
1443 1447 wlock.release()
1444 1448
1445 1449 def copy(self, source, dest):
1446 1450 try:
1447 1451 st = self._repo.wvfs.lstat(dest)
1448 1452 except OSError as err:
1449 1453 if err.errno != errno.ENOENT:
1450 1454 raise
1451 1455 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1452 1456 return
1453 1457 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1454 1458 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1455 1459 "symbolic link\n") % dest)
1456 1460 else:
1457 1461 wlock = self._repo.wlock()
1458 1462 try:
1459 1463 if self._repo.dirstate[dest] in '?':
1460 1464 self._repo.dirstate.add(dest)
1461 1465 elif self._repo.dirstate[dest] in 'r':
1462 1466 self._repo.dirstate.normallookup(dest)
1463 1467 self._repo.dirstate.copy(source, dest)
1464 1468 finally:
1465 1469 wlock.release()
1466 1470
1467 1471 def match(self, pats=[], include=None, exclude=None, default='glob',
1468 1472 listsubrepos=False, badfn=None):
1469 1473 r = self._repo
1470 1474
1471 1475 # Only a case insensitive filesystem needs magic to translate user input
1472 1476 # to actual case in the filesystem.
1473 1477 if not util.checkcase(r.root):
1474 1478 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1475 1479 exclude, default, r.auditor, self,
1476 1480 listsubrepos=listsubrepos,
1477 1481 badfn=badfn)
1478 1482 return matchmod.match(r.root, r.getcwd(), pats,
1479 1483 include, exclude, default,
1480 1484 auditor=r.auditor, ctx=self,
1481 1485 listsubrepos=listsubrepos, badfn=badfn)
1482 1486
1483 1487 def _filtersuspectsymlink(self, files):
1484 1488 if not files or self._repo.dirstate._checklink:
1485 1489 return files
1486 1490
1487 1491 # Symlink placeholders may get non-symlink-like contents
1488 1492 # via user error or dereferencing by NFS or Samba servers,
1489 1493 # so we filter out any placeholders that don't look like a
1490 1494 # symlink
1491 1495 sane = []
1492 1496 for f in files:
1493 1497 if self.flags(f) == 'l':
1494 1498 d = self[f].data()
1495 1499 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1496 1500 self._repo.ui.debug('ignoring suspect symlink placeholder'
1497 1501 ' "%s"\n' % f)
1498 1502 continue
1499 1503 sane.append(f)
1500 1504 return sane
1501 1505
1502 1506 def _checklookup(self, files):
1503 1507 # check for any possibly clean files
1504 1508 if not files:
1505 1509 return [], []
1506 1510
1507 1511 modified = []
1508 1512 fixup = []
1509 1513 pctx = self._parents[0]
1510 1514 # do a full compare of any files that might have changed
1511 1515 for f in sorted(files):
1512 1516 if (f not in pctx or self.flags(f) != pctx.flags(f)
1513 1517 or pctx[f].cmp(self[f])):
1514 1518 modified.append(f)
1515 1519 else:
1516 1520 fixup.append(f)
1517 1521
1518 1522 # update dirstate for files that are actually clean
1519 1523 if fixup:
1520 1524 try:
1521 1525 # updating the dirstate is optional
1522 1526 # so we don't wait on the lock
1523 1527 # wlock can invalidate the dirstate, so cache normal _after_
1524 1528 # taking the lock
1525 1529 wlock = self._repo.wlock(False)
1526 1530 normal = self._repo.dirstate.normal
1527 1531 try:
1528 1532 for f in fixup:
1529 1533 normal(f)
1530 1534 # write changes out explicitly, because nesting
1531 1535 # wlock at runtime may prevent 'wlock.release()'
1532 1536 # below from doing so for subsequent changing files
1533 1537 self._repo.dirstate.write(self._repo.currenttransaction())
1534 1538 finally:
1535 1539 wlock.release()
1536 1540 except error.LockError:
1537 1541 pass
1538 1542 return modified, fixup
1539 1543
1540 1544 def _manifestmatches(self, match, s):
1541 1545 """Slow path for workingctx
1542 1546
1543 1547 The fast path is when we compare the working directory to its parent
1544 1548 which means this function is comparing with a non-parent; therefore we
1545 1549 need to build a manifest and return what matches.
1546 1550 """
1547 1551 mf = self._repo['.']._manifestmatches(match, s)
1548 1552 for f in s.modified + s.added:
1549 1553 mf[f] = _newnode
1550 1554 mf.setflag(f, self.flags(f))
1551 1555 for f in s.removed:
1552 1556 if f in mf:
1553 1557 del mf[f]
1554 1558 return mf
1555 1559
1556 1560 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1557 1561 unknown=False):
1558 1562 '''Gets the status from the dirstate -- internal use only.'''
1559 1563 listignored, listclean, listunknown = ignored, clean, unknown
1560 1564 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1561 1565 subrepos = []
1562 1566 if '.hgsub' in self:
1563 1567 subrepos = sorted(self.substate)
1564 1568 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1565 1569 listclean, listunknown)
1566 1570
1567 1571 # check for any possibly clean files
1568 1572 if cmp:
1569 1573 modified2, fixup = self._checklookup(cmp)
1570 1574 s.modified.extend(modified2)
1571 1575
1572 1576 # update dirstate for files that are actually clean
1573 1577 if fixup and listclean:
1574 1578 s.clean.extend(fixup)
1575 1579
1576 1580 if match.always():
1577 1581 # cache for performance
1578 1582 if s.unknown or s.ignored or s.clean:
1579 1583 # "_status" is cached with list*=False in the normal route
1580 1584 self._status = scmutil.status(s.modified, s.added, s.removed,
1581 1585 s.deleted, [], [], [])
1582 1586 else:
1583 1587 self._status = s
1584 1588
1585 1589 return s
1586 1590
1587 1591 def _buildstatus(self, other, s, match, listignored, listclean,
1588 1592 listunknown):
1589 1593 """build a status with respect to another context
1590 1594
1591 1595 This includes logic for maintaining the fast path of status when
1592 1596 comparing the working directory against its parent, which is to skip
1593 1597 building a new manifest if self (working directory) is not comparing
1594 1598 against its parent (repo['.']).
1595 1599 """
1596 1600 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1597 1601 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1598 1602 # might have accidentally ended up with the entire contents of the file
1599 1603 # they are supposed to be linking to.
1600 1604 s.modified[:] = self._filtersuspectsymlink(s.modified)
1601 1605 if other != self._repo['.']:
1602 1606 s = super(workingctx, self)._buildstatus(other, s, match,
1603 1607 listignored, listclean,
1604 1608 listunknown)
1605 1609 return s
1606 1610
1607 1611 def _matchstatus(self, other, match):
1608 1612 """override the match method with a filter for directory patterns
1609 1613
1610 1614 We use inheritance to customize the match.bad method only in cases of
1611 1615 workingctx since it belongs only to the working directory when
1612 1616 comparing against the parent changeset.
1613 1617
1614 1618 If we aren't comparing against the working directory's parent, then we
1615 1619 just use the default match object sent to us.
1616 1620 """
1617 1621 superself = super(workingctx, self)
1618 1622 match = superself._matchstatus(other, match)
1619 1623 if other != self._repo['.']:
1620 1624 def bad(f, msg):
1621 1625 # 'f' may be a directory pattern from 'match.files()',
1622 1626 # so 'f not in ctx1' is not enough
1623 1627 if f not in other and not other.hasdir(f):
1624 1628 self._repo.ui.warn('%s: %s\n' %
1625 1629 (self._repo.dirstate.pathto(f), msg))
1626 1630 match.bad = bad
1627 1631 return match
1628 1632
1629 1633 class committablefilectx(basefilectx):
1630 1634 """A committablefilectx provides common functionality for a file context
1631 1635 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1632 1636 def __init__(self, repo, path, filelog=None, ctx=None):
1633 1637 self._repo = repo
1634 1638 self._path = path
1635 1639 self._changeid = None
1636 1640 self._filerev = self._filenode = None
1637 1641
1638 1642 if filelog is not None:
1639 1643 self._filelog = filelog
1640 1644 if ctx:
1641 1645 self._changectx = ctx
1642 1646
1643 1647 def __nonzero__(self):
1644 1648 return True
1645 1649
1646 1650 def linkrev(self):
1647 1651 # linked to self._changectx no matter if file is modified or not
1648 1652 return self.rev()
1649 1653
1650 1654 def parents(self):
1651 1655 '''return parent filectxs, following copies if necessary'''
1652 1656 def filenode(ctx, path):
1653 1657 return ctx._manifest.get(path, nullid)
1654 1658
1655 1659 path = self._path
1656 1660 fl = self._filelog
1657 1661 pcl = self._changectx._parents
1658 1662 renamed = self.renamed()
1659 1663
1660 1664 if renamed:
1661 1665 pl = [renamed + (None,)]
1662 1666 else:
1663 1667 pl = [(path, filenode(pcl[0], path), fl)]
1664 1668
1665 1669 for pc in pcl[1:]:
1666 1670 pl.append((path, filenode(pc, path), fl))
1667 1671
1668 1672 return [self._parentfilectx(p, fileid=n, filelog=l)
1669 1673 for p, n, l in pl if n != nullid]
1670 1674
1671 1675 def children(self):
1672 1676 return []
1673 1677
1674 1678 class workingfilectx(committablefilectx):
1675 1679 """A workingfilectx object makes access to data related to a particular
1676 1680 file in the working directory convenient."""
1677 1681 def __init__(self, repo, path, filelog=None, workingctx=None):
1678 1682 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1679 1683
1680 1684 @propertycache
1681 1685 def _changectx(self):
1682 1686 return workingctx(self._repo)
1683 1687
1684 1688 def data(self):
1685 1689 return self._repo.wread(self._path)
1686 1690 def renamed(self):
1687 1691 rp = self._repo.dirstate.copied(self._path)
1688 1692 if not rp:
1689 1693 return None
1690 1694 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1691 1695
1692 1696 def size(self):
1693 1697 return self._repo.wvfs.lstat(self._path).st_size
1694 1698 def date(self):
1695 1699 t, tz = self._changectx.date()
1696 1700 try:
1697 1701 return (util.statmtimesec(self._repo.wvfs.lstat(self._path)), tz)
1698 1702 except OSError as err:
1699 1703 if err.errno != errno.ENOENT:
1700 1704 raise
1701 1705 return (t, tz)
1702 1706
1703 1707 def cmp(self, fctx):
1704 1708 """compare with other file context
1705 1709
1706 1710 returns True if different than fctx.
1707 1711 """
1708 1712 # fctx should be a filectx (not a workingfilectx)
1709 1713 # invert comparison to reuse the same code path
1710 1714 return fctx.cmp(self)
1711 1715
1712 1716 def remove(self, ignoremissing=False):
1713 1717 """wraps unlink for a repo's working directory"""
1714 1718 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1715 1719
1716 1720 def write(self, data, flags):
1717 1721 """wraps repo.wwrite"""
1718 1722 self._repo.wwrite(self._path, data, flags)
1719 1723
1720 1724 class workingcommitctx(workingctx):
1721 1725 """A workingcommitctx object makes access to data related to
1722 1726 the revision being committed convenient.
1723 1727
1724 1728 This hides changes in the working directory, if they aren't
1725 1729 committed in this context.
1726 1730 """
1727 1731 def __init__(self, repo, changes,
1728 1732 text="", user=None, date=None, extra=None):
1729 1733 super(workingctx, self).__init__(repo, text, user, date, extra,
1730 1734 changes)
1731 1735
1732 1736 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1733 1737 unknown=False):
1734 1738 """Return matched files only in ``self._status``
1735 1739
1736 1740 Uncommitted files appear "clean" via this context, even if
1737 1741 they aren't actually so in the working directory.
1738 1742 """
1739 1743 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1740 1744 if clean:
1741 1745 clean = [f for f in self._manifest if f not in self._changedset]
1742 1746 else:
1743 1747 clean = []
1744 1748 return scmutil.status([f for f in self._status.modified if match(f)],
1745 1749 [f for f in self._status.added if match(f)],
1746 1750 [f for f in self._status.removed if match(f)],
1747 1751 [], [], [], clean)
1748 1752
1749 1753 @propertycache
1750 1754 def _changedset(self):
1751 1755 """Return the set of files changed in this context
1752 1756 """
1753 1757 changed = set(self._status.modified)
1754 1758 changed.update(self._status.added)
1755 1759 changed.update(self._status.removed)
1756 1760 return changed
1757 1761
1758 1762 class memctx(committablectx):
1759 1763 """Use memctx to perform in-memory commits via localrepo.commitctx().
1760 1764
1761 1765 Revision information is supplied at initialization time while
1762 1766 related files data and is made available through a callback
1763 1767 mechanism. 'repo' is the current localrepo, 'parents' is a
1764 1768 sequence of two parent revisions identifiers (pass None for every
1765 1769 missing parent), 'text' is the commit message and 'files' lists
1766 1770 names of files touched by the revision (normalized and relative to
1767 1771 repository root).
1768 1772
1769 1773 filectxfn(repo, memctx, path) is a callable receiving the
1770 1774 repository, the current memctx object and the normalized path of
1771 1775 requested file, relative to repository root. It is fired by the
1772 1776 commit function for every file in 'files', but calls order is
1773 1777 undefined. If the file is available in the revision being
1774 1778 committed (updated or added), filectxfn returns a memfilectx
1775 1779 object. If the file was removed, filectxfn raises an
1776 1780 IOError. Moved files are represented by marking the source file
1777 1781 removed and the new file added with copy information (see
1778 1782 memfilectx).
1779 1783
1780 1784 user receives the committer name and defaults to current
1781 1785 repository username, date is the commit date in any format
1782 1786 supported by util.parsedate() and defaults to current date, extra
1783 1787 is a dictionary of metadata or is left empty.
1784 1788 """
1785 1789
1786 1790 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1787 1791 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1788 1792 # this field to determine what to do in filectxfn.
1789 1793 _returnnoneformissingfiles = True
1790 1794
1791 1795 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1792 1796 date=None, extra=None, editor=False):
1793 1797 super(memctx, self).__init__(repo, text, user, date, extra)
1794 1798 self._rev = None
1795 1799 self._node = None
1796 1800 parents = [(p or nullid) for p in parents]
1797 1801 p1, p2 = parents
1798 1802 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1799 1803 files = sorted(set(files))
1800 1804 self._files = files
1801 1805 self.substate = {}
1802 1806
1803 1807 # if store is not callable, wrap it in a function
1804 1808 if not callable(filectxfn):
1805 1809 def getfilectx(repo, memctx, path):
1806 1810 fctx = filectxfn[path]
1807 1811 # this is weird but apparently we only keep track of one parent
1808 1812 # (why not only store that instead of a tuple?)
1809 1813 copied = fctx.renamed()
1810 1814 if copied:
1811 1815 copied = copied[0]
1812 1816 return memfilectx(repo, path, fctx.data(),
1813 1817 islink=fctx.islink(), isexec=fctx.isexec(),
1814 1818 copied=copied, memctx=memctx)
1815 1819 self._filectxfn = getfilectx
1816 1820 else:
1817 1821 # "util.cachefunc" reduces invocation of possibly expensive
1818 1822 # "filectxfn" for performance (e.g. converting from another VCS)
1819 1823 self._filectxfn = util.cachefunc(filectxfn)
1820 1824
1821 1825 if extra:
1822 1826 self._extra = extra.copy()
1823 1827 else:
1824 1828 self._extra = {}
1825 1829
1826 1830 if self._extra.get('branch', '') == '':
1827 1831 self._extra['branch'] = 'default'
1828 1832
1829 1833 if editor:
1830 1834 self._text = editor(self._repo, self, [])
1831 1835 self._repo.savecommitmessage(self._text)
1832 1836
1833 1837 def filectx(self, path, filelog=None):
1834 1838 """get a file context from the working directory
1835 1839
1836 1840 Returns None if file doesn't exist and should be removed."""
1837 1841 return self._filectxfn(self._repo, self, path)
1838 1842
1839 1843 def commit(self):
1840 1844 """commit context to the repo"""
1841 1845 return self._repo.commitctx(self)
1842 1846
1843 1847 @propertycache
1844 1848 def _manifest(self):
1845 1849 """generate a manifest based on the return values of filectxfn"""
1846 1850
1847 1851 # keep this simple for now; just worry about p1
1848 1852 pctx = self._parents[0]
1849 1853 man = pctx.manifest().copy()
1850 1854
1851 1855 for f in self._status.modified:
1852 1856 p1node = nullid
1853 1857 p2node = nullid
1854 1858 p = pctx[f].parents() # if file isn't in pctx, check p2?
1855 1859 if len(p) > 0:
1856 1860 p1node = p[0].node()
1857 1861 if len(p) > 1:
1858 1862 p2node = p[1].node()
1859 1863 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1860 1864
1861 1865 for f in self._status.added:
1862 1866 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1863 1867
1864 1868 for f in self._status.removed:
1865 1869 if f in man:
1866 1870 del man[f]
1867 1871
1868 1872 return man
1869 1873
1870 1874 @propertycache
1871 1875 def _status(self):
1872 1876 """Calculate exact status from ``files`` specified at construction
1873 1877 """
1874 1878 man1 = self.p1().manifest()
1875 1879 p2 = self._parents[1]
1876 1880 # "1 < len(self._parents)" can't be used for checking
1877 1881 # existence of the 2nd parent, because "memctx._parents" is
1878 1882 # explicitly initialized by the list, of which length is 2.
1879 1883 if p2.node() != nullid:
1880 1884 man2 = p2.manifest()
1881 1885 managing = lambda f: f in man1 or f in man2
1882 1886 else:
1883 1887 managing = lambda f: f in man1
1884 1888
1885 1889 modified, added, removed = [], [], []
1886 1890 for f in self._files:
1887 1891 if not managing(f):
1888 1892 added.append(f)
1889 1893 elif self[f]:
1890 1894 modified.append(f)
1891 1895 else:
1892 1896 removed.append(f)
1893 1897
1894 1898 return scmutil.status(modified, added, removed, [], [], [], [])
1895 1899
1896 1900 class memfilectx(committablefilectx):
1897 1901 """memfilectx represents an in-memory file to commit.
1898 1902
1899 1903 See memctx and committablefilectx for more details.
1900 1904 """
1901 1905 def __init__(self, repo, path, data, islink=False,
1902 1906 isexec=False, copied=None, memctx=None):
1903 1907 """
1904 1908 path is the normalized file path relative to repository root.
1905 1909 data is the file content as a string.
1906 1910 islink is True if the file is a symbolic link.
1907 1911 isexec is True if the file is executable.
1908 1912 copied is the source file path if current file was copied in the
1909 1913 revision being committed, or None."""
1910 1914 super(memfilectx, self).__init__(repo, path, None, memctx)
1911 1915 self._data = data
1912 1916 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1913 1917 self._copied = None
1914 1918 if copied:
1915 1919 self._copied = (copied, nullid)
1916 1920
1917 1921 def data(self):
1918 1922 return self._data
1919 1923 def size(self):
1920 1924 return len(self.data())
1921 1925 def flags(self):
1922 1926 return self._flags
1923 1927 def renamed(self):
1924 1928 return self._copied
1925 1929
1926 1930 def remove(self, ignoremissing=False):
1927 1931 """wraps unlink for a repo's working directory"""
1928 1932 # need to figure out what to do here
1929 1933 del self._changectx[self._path]
1930 1934
1931 1935 def write(self, data, flags):
1932 1936 """wraps repo.wwrite"""
1933 1937 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now