##// END OF EJS Templates
workingctx: use node.wdirid constant
Yuya Nishihara -
r25738:04d26a3c default
parent child Browse files
Show More
@@ -1,1919 +1,1919 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, wdirid, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 return iter(self._manifest)
70 70
71 71 def _manifestmatches(self, match, s):
72 72 """generate a new manifest filtered by the match argument
73 73
74 74 This method is for internal use only and mainly exists to provide an
75 75 object oriented way for other contexts to customize the manifest
76 76 generation.
77 77 """
78 78 return self.manifest().matches(match)
79 79
80 80 def _matchstatus(self, other, match):
81 81 """return match.always if match is none
82 82
83 83 This internal method provides a way for child objects to override the
84 84 match operator.
85 85 """
86 86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 87
88 88 def _buildstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """build a status with respect to another context"""
91 91 # Load earliest manifest first for caching reasons. More specifically,
92 92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 95 # delta to what's in the cache. So that's one full reconstruction + one
96 96 # delta application.
97 97 if self.rev() is not None and self.rev() < other.rev():
98 98 self.manifest()
99 99 mf1 = other._manifestmatches(match, s)
100 100 mf2 = self._manifestmatches(match, s)
101 101
102 102 modified, added = [], []
103 103 removed = []
104 104 clean = []
105 105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 106 deletedset = set(deleted)
107 107 d = mf1.diff(mf2, clean=listclean)
108 108 for fn, value in d.iteritems():
109 109 if fn in deletedset:
110 110 continue
111 111 if value is None:
112 112 clean.append(fn)
113 113 continue
114 114 (node1, flag1), (node2, flag2) = value
115 115 if node1 is None:
116 116 added.append(fn)
117 117 elif node2 is None:
118 118 removed.append(fn)
119 119 elif node2 != _newnode:
120 120 # The file was not a new file in mf2, so an entry
121 121 # from diff is really a difference.
122 122 modified.append(fn)
123 123 elif self[fn].cmp(other[fn]):
124 124 # node2 was newnode, but the working file doesn't
125 125 # match the one in mf1.
126 126 modified.append(fn)
127 127 else:
128 128 clean.append(fn)
129 129
130 130 if removed:
131 131 # need to filter files if they are already reported as removed
132 132 unknown = [fn for fn in unknown if fn not in mf1]
133 133 ignored = [fn for fn in ignored if fn not in mf1]
134 134 # if they're deleted, don't report them as removed
135 135 removed = [fn for fn in removed if fn not in deletedset]
136 136
137 137 return scmutil.status(modified, added, removed, deleted, unknown,
138 138 ignored, clean)
139 139
140 140 @propertycache
141 141 def substate(self):
142 142 return subrepo.state(self, self._repo.ui)
143 143
144 144 def subrev(self, subpath):
145 145 return self.substate[subpath][1]
146 146
147 147 def rev(self):
148 148 return self._rev
149 149 def node(self):
150 150 return self._node
151 151 def hex(self):
152 152 return hex(self.node())
153 153 def manifest(self):
154 154 return self._manifest
155 155 def repo(self):
156 156 return self._repo
157 157 def phasestr(self):
158 158 return phases.phasenames[self.phase()]
159 159 def mutable(self):
160 160 return self.phase() > phases.public
161 161
162 162 def getfileset(self, expr):
163 163 return fileset.getfileset(self, expr)
164 164
165 165 def obsolete(self):
166 166 """True if the changeset is obsolete"""
167 167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168 168
169 169 def extinct(self):
170 170 """True if the changeset is extinct"""
171 171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172 172
173 173 def unstable(self):
174 174 """True if the changeset is not obsolete but it's ancestor are"""
175 175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176 176
177 177 def bumped(self):
178 178 """True if the changeset try to be a successor of a public changeset
179 179
180 180 Only non-public and non-obsolete changesets may be bumped.
181 181 """
182 182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183 183
184 184 def divergent(self):
185 185 """Is a successors of a changeset with multiple possible successors set
186 186
187 187 Only non-public and non-obsolete changesets may be divergent.
188 188 """
189 189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190 190
191 191 def troubled(self):
192 192 """True if the changeset is either unstable, bumped or divergent"""
193 193 return self.unstable() or self.bumped() or self.divergent()
194 194
195 195 def troubles(self):
196 196 """return the list of troubles affecting this changesets.
197 197
198 198 Troubles are returned as strings. possible values are:
199 199 - unstable,
200 200 - bumped,
201 201 - divergent.
202 202 """
203 203 troubles = []
204 204 if self.unstable():
205 205 troubles.append('unstable')
206 206 if self.bumped():
207 207 troubles.append('bumped')
208 208 if self.divergent():
209 209 troubles.append('divergent')
210 210 return troubles
211 211
212 212 def parents(self):
213 213 """return contexts for each parent changeset"""
214 214 return self._parents
215 215
216 216 def p1(self):
217 217 return self._parents[0]
218 218
219 219 def p2(self):
220 220 if len(self._parents) == 2:
221 221 return self._parents[1]
222 222 return changectx(self._repo, -1)
223 223
224 224 def _fileinfo(self, path):
225 225 if '_manifest' in self.__dict__:
226 226 try:
227 227 return self._manifest[path], self._manifest.flags(path)
228 228 except KeyError:
229 229 raise error.ManifestLookupError(self._node, path,
230 230 _('not found in manifest'))
231 231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 232 if path in self._manifestdelta:
233 233 return (self._manifestdelta[path],
234 234 self._manifestdelta.flags(path))
235 235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 236 if not node:
237 237 raise error.ManifestLookupError(self._node, path,
238 238 _('not found in manifest'))
239 239
240 240 return node, flag
241 241
242 242 def filenode(self, path):
243 243 return self._fileinfo(path)[0]
244 244
245 245 def flags(self, path):
246 246 try:
247 247 return self._fileinfo(path)[1]
248 248 except error.LookupError:
249 249 return ''
250 250
251 251 def sub(self, path):
252 252 '''return a subrepo for the stored revision of path, never wdir()'''
253 253 return subrepo.subrepo(self, path)
254 254
255 255 def nullsub(self, path, pctx):
256 256 return subrepo.nullsubrepo(self, path, pctx)
257 257
258 258 def workingsub(self, path):
259 259 '''return a subrepo for the stored revision, or wdir if this is a wdir
260 260 context.
261 261 '''
262 262 return subrepo.subrepo(self, path, allowwdir=True)
263 263
264 264 def match(self, pats=[], include=None, exclude=None, default='glob',
265 265 listsubrepos=False, badfn=None):
266 266 r = self._repo
267 267 return matchmod.match(r.root, r.getcwd(), pats,
268 268 include, exclude, default,
269 269 auditor=r.auditor, ctx=self,
270 270 listsubrepos=listsubrepos, badfn=badfn)
271 271
272 272 def diff(self, ctx2=None, match=None, **opts):
273 273 """Returns a diff generator for the given contexts and matcher"""
274 274 if ctx2 is None:
275 275 ctx2 = self.p1()
276 276 if ctx2 is not None:
277 277 ctx2 = self._repo[ctx2]
278 278 diffopts = patch.diffopts(self._repo.ui, opts)
279 279 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
280 280
281 281 def dirs(self):
282 282 return self._manifest.dirs()
283 283
284 284 def hasdir(self, dir):
285 285 return self._manifest.hasdir(dir)
286 286
287 287 def dirty(self, missing=False, merge=True, branch=True):
288 288 return False
289 289
290 290 def status(self, other=None, match=None, listignored=False,
291 291 listclean=False, listunknown=False, listsubrepos=False):
292 292 """return status of files between two nodes or node and working
293 293 directory.
294 294
295 295 If other is None, compare this node with working directory.
296 296
297 297 returns (modified, added, removed, deleted, unknown, ignored, clean)
298 298 """
299 299
300 300 ctx1 = self
301 301 ctx2 = self._repo[other]
302 302
303 303 # This next code block is, admittedly, fragile logic that tests for
304 304 # reversing the contexts and wouldn't need to exist if it weren't for
305 305 # the fast (and common) code path of comparing the working directory
306 306 # with its first parent.
307 307 #
308 308 # What we're aiming for here is the ability to call:
309 309 #
310 310 # workingctx.status(parentctx)
311 311 #
312 312 # If we always built the manifest for each context and compared those,
313 313 # then we'd be done. But the special case of the above call means we
314 314 # just copy the manifest of the parent.
315 315 reversed = False
316 316 if (not isinstance(ctx1, changectx)
317 317 and isinstance(ctx2, changectx)):
318 318 reversed = True
319 319 ctx1, ctx2 = ctx2, ctx1
320 320
321 321 match = ctx2._matchstatus(ctx1, match)
322 322 r = scmutil.status([], [], [], [], [], [], [])
323 323 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
324 324 listunknown)
325 325
326 326 if reversed:
327 327 # Reverse added and removed. Clear deleted, unknown and ignored as
328 328 # these make no sense to reverse.
329 329 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
330 330 r.clean)
331 331
332 332 if listsubrepos:
333 333 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
334 334 rev2 = ctx2.subrev(subpath)
335 335 try:
336 336 submatch = matchmod.narrowmatcher(subpath, match)
337 337 s = sub.status(rev2, match=submatch, ignored=listignored,
338 338 clean=listclean, unknown=listunknown,
339 339 listsubrepos=True)
340 340 for rfiles, sfiles in zip(r, s):
341 341 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
342 342 except error.LookupError:
343 343 self._repo.ui.status(_("skipping missing "
344 344 "subrepository: %s\n") % subpath)
345 345
346 346 for l in r:
347 347 l.sort()
348 348
349 349 return r
350 350
351 351
352 352 def makememctx(repo, parents, text, user, date, branch, files, store,
353 353 editor=None, extra=None):
354 354 def getfilectx(repo, memctx, path):
355 355 data, mode, copied = store.getfile(path)
356 356 if data is None:
357 357 return None
358 358 islink, isexec = mode
359 359 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
360 360 copied=copied, memctx=memctx)
361 361 if extra is None:
362 362 extra = {}
363 363 if branch:
364 364 extra['branch'] = encoding.fromlocal(branch)
365 365 ctx = memctx(repo, parents, text, files, getfilectx, user,
366 366 date, extra, editor)
367 367 return ctx
368 368
369 369 class changectx(basectx):
370 370 """A changecontext object makes access to data related to a particular
371 371 changeset convenient. It represents a read-only context already present in
372 372 the repo."""
373 373 def __init__(self, repo, changeid=''):
374 374 """changeid is a revision number, node, or tag"""
375 375
376 376 # since basectx.__new__ already took care of copying the object, we
377 377 # don't need to do anything in __init__, so we just exit here
378 378 if isinstance(changeid, basectx):
379 379 return
380 380
381 381 if changeid == '':
382 382 changeid = '.'
383 383 self._repo = repo
384 384
385 385 try:
386 386 if isinstance(changeid, int):
387 387 self._node = repo.changelog.node(changeid)
388 388 self._rev = changeid
389 389 return
390 390 if isinstance(changeid, long):
391 391 changeid = str(changeid)
392 392 if changeid == 'null':
393 393 self._node = nullid
394 394 self._rev = nullrev
395 395 return
396 396 if changeid == 'tip':
397 397 self._node = repo.changelog.tip()
398 398 self._rev = repo.changelog.rev(self._node)
399 399 return
400 400 if changeid == '.' or changeid == repo.dirstate.p1():
401 401 # this is a hack to delay/avoid loading obsmarkers
402 402 # when we know that '.' won't be hidden
403 403 self._node = repo.dirstate.p1()
404 404 self._rev = repo.unfiltered().changelog.rev(self._node)
405 405 return
406 406 if len(changeid) == 20:
407 407 try:
408 408 self._node = changeid
409 409 self._rev = repo.changelog.rev(changeid)
410 410 return
411 411 except error.FilteredRepoLookupError:
412 412 raise
413 413 except LookupError:
414 414 pass
415 415
416 416 try:
417 417 r = int(changeid)
418 418 if str(r) != changeid:
419 419 raise ValueError
420 420 l = len(repo.changelog)
421 421 if r < 0:
422 422 r += l
423 423 if r < 0 or r >= l:
424 424 raise ValueError
425 425 self._rev = r
426 426 self._node = repo.changelog.node(r)
427 427 return
428 428 except error.FilteredIndexError:
429 429 raise
430 430 except (ValueError, OverflowError, IndexError):
431 431 pass
432 432
433 433 if len(changeid) == 40:
434 434 try:
435 435 self._node = bin(changeid)
436 436 self._rev = repo.changelog.rev(self._node)
437 437 return
438 438 except error.FilteredLookupError:
439 439 raise
440 440 except (TypeError, LookupError):
441 441 pass
442 442
443 443 # lookup bookmarks through the name interface
444 444 try:
445 445 self._node = repo.names.singlenode(repo, changeid)
446 446 self._rev = repo.changelog.rev(self._node)
447 447 return
448 448 except KeyError:
449 449 pass
450 450 except error.FilteredRepoLookupError:
451 451 raise
452 452 except error.RepoLookupError:
453 453 pass
454 454
455 455 self._node = repo.unfiltered().changelog._partialmatch(changeid)
456 456 if self._node is not None:
457 457 self._rev = repo.changelog.rev(self._node)
458 458 return
459 459
460 460 # lookup failed
461 461 # check if it might have come from damaged dirstate
462 462 #
463 463 # XXX we could avoid the unfiltered if we had a recognizable
464 464 # exception for filtered changeset access
465 465 if changeid in repo.unfiltered().dirstate.parents():
466 466 msg = _("working directory has unknown parent '%s'!")
467 467 raise error.Abort(msg % short(changeid))
468 468 try:
469 469 if len(changeid) == 20:
470 470 changeid = hex(changeid)
471 471 except TypeError:
472 472 pass
473 473 except (error.FilteredIndexError, error.FilteredLookupError,
474 474 error.FilteredRepoLookupError):
475 475 if repo.filtername.startswith('visible'):
476 476 msg = _("hidden revision '%s'") % changeid
477 477 hint = _('use --hidden to access hidden revisions')
478 478 raise error.FilteredRepoLookupError(msg, hint=hint)
479 479 msg = _("filtered revision '%s' (not in '%s' subset)")
480 480 msg %= (changeid, repo.filtername)
481 481 raise error.FilteredRepoLookupError(msg)
482 482 except IndexError:
483 483 pass
484 484 raise error.RepoLookupError(
485 485 _("unknown revision '%s'") % changeid)
486 486
487 487 def __hash__(self):
488 488 try:
489 489 return hash(self._rev)
490 490 except AttributeError:
491 491 return id(self)
492 492
493 493 def __nonzero__(self):
494 494 return self._rev != nullrev
495 495
496 496 @propertycache
497 497 def _changeset(self):
498 498 return self._repo.changelog.read(self.rev())
499 499
500 500 @propertycache
501 501 def _manifest(self):
502 502 return self._repo.manifest.read(self._changeset[0])
503 503
504 504 @propertycache
505 505 def _manifestdelta(self):
506 506 return self._repo.manifest.readdelta(self._changeset[0])
507 507
508 508 @propertycache
509 509 def _parents(self):
510 510 p = self._repo.changelog.parentrevs(self._rev)
511 511 if p[1] == nullrev:
512 512 p = p[:-1]
513 513 return [changectx(self._repo, x) for x in p]
514 514
515 515 def changeset(self):
516 516 return self._changeset
517 517 def manifestnode(self):
518 518 return self._changeset[0]
519 519
520 520 def user(self):
521 521 return self._changeset[1]
522 522 def date(self):
523 523 return self._changeset[2]
524 524 def files(self):
525 525 return self._changeset[3]
526 526 def description(self):
527 527 return self._changeset[4]
528 528 def branch(self):
529 529 return encoding.tolocal(self._changeset[5].get("branch"))
530 530 def closesbranch(self):
531 531 return 'close' in self._changeset[5]
532 532 def extra(self):
533 533 return self._changeset[5]
534 534 def tags(self):
535 535 return self._repo.nodetags(self._node)
536 536 def bookmarks(self):
537 537 return self._repo.nodebookmarks(self._node)
538 538 def phase(self):
539 539 return self._repo._phasecache.phase(self._repo, self._rev)
540 540 def hidden(self):
541 541 return self._rev in repoview.filterrevs(self._repo, 'visible')
542 542
543 543 def children(self):
544 544 """return contexts for each child changeset"""
545 545 c = self._repo.changelog.children(self._node)
546 546 return [changectx(self._repo, x) for x in c]
547 547
548 548 def ancestors(self):
549 549 for a in self._repo.changelog.ancestors([self._rev]):
550 550 yield changectx(self._repo, a)
551 551
552 552 def descendants(self):
553 553 for d in self._repo.changelog.descendants([self._rev]):
554 554 yield changectx(self._repo, d)
555 555
556 556 def filectx(self, path, fileid=None, filelog=None):
557 557 """get a file context from this changeset"""
558 558 if fileid is None:
559 559 fileid = self.filenode(path)
560 560 return filectx(self._repo, path, fileid=fileid,
561 561 changectx=self, filelog=filelog)
562 562
563 563 def ancestor(self, c2, warn=False):
564 564 """return the "best" ancestor context of self and c2
565 565
566 566 If there are multiple candidates, it will show a message and check
567 567 merge.preferancestor configuration before falling back to the
568 568 revlog ancestor."""
569 569 # deal with workingctxs
570 570 n2 = c2._node
571 571 if n2 is None:
572 572 n2 = c2._parents[0]._node
573 573 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
574 574 if not cahs:
575 575 anc = nullid
576 576 elif len(cahs) == 1:
577 577 anc = cahs[0]
578 578 else:
579 579 for r in self._repo.ui.configlist('merge', 'preferancestor'):
580 580 try:
581 581 ctx = changectx(self._repo, r)
582 582 except error.RepoLookupError:
583 583 continue
584 584 anc = ctx.node()
585 585 if anc in cahs:
586 586 break
587 587 else:
588 588 anc = self._repo.changelog.ancestor(self._node, n2)
589 589 if warn:
590 590 self._repo.ui.status(
591 591 (_("note: using %s as ancestor of %s and %s\n") %
592 592 (short(anc), short(self._node), short(n2))) +
593 593 ''.join(_(" alternatively, use --config "
594 594 "merge.preferancestor=%s\n") %
595 595 short(n) for n in sorted(cahs) if n != anc))
596 596 return changectx(self._repo, anc)
597 597
598 598 def descendant(self, other):
599 599 """True if other is descendant of this changeset"""
600 600 return self._repo.changelog.descendant(self._rev, other._rev)
601 601
602 602 def walk(self, match):
603 603 '''Generates matching file names.'''
604 604
605 605 # Wrap match.bad method to have message with nodeid
606 606 def bad(fn, msg):
607 607 # The manifest doesn't know about subrepos, so don't complain about
608 608 # paths into valid subrepos.
609 609 if any(fn == s or fn.startswith(s + '/')
610 610 for s in self.substate):
611 611 return
612 612 match.bad(fn, _('no such file in rev %s') % self)
613 613
614 614 m = matchmod.badmatch(match, bad)
615 615 return self._manifest.walk(m)
616 616
617 617 def matches(self, match):
618 618 return self.walk(match)
619 619
620 620 class basefilectx(object):
621 621 """A filecontext object represents the common logic for its children:
622 622 filectx: read-only access to a filerevision that is already present
623 623 in the repo,
624 624 workingfilectx: a filecontext that represents files from the working
625 625 directory,
626 626 memfilectx: a filecontext that represents files in-memory."""
627 627 def __new__(cls, repo, path, *args, **kwargs):
628 628 return super(basefilectx, cls).__new__(cls)
629 629
630 630 @propertycache
631 631 def _filelog(self):
632 632 return self._repo.file(self._path)
633 633
634 634 @propertycache
635 635 def _changeid(self):
636 636 if '_changeid' in self.__dict__:
637 637 return self._changeid
638 638 elif '_changectx' in self.__dict__:
639 639 return self._changectx.rev()
640 640 elif '_descendantrev' in self.__dict__:
641 641 # this file context was created from a revision with a known
642 642 # descendant, we can (lazily) correct for linkrev aliases
643 643 return self._adjustlinkrev(self._path, self._filelog,
644 644 self._filenode, self._descendantrev)
645 645 else:
646 646 return self._filelog.linkrev(self._filerev)
647 647
648 648 @propertycache
649 649 def _filenode(self):
650 650 if '_fileid' in self.__dict__:
651 651 return self._filelog.lookup(self._fileid)
652 652 else:
653 653 return self._changectx.filenode(self._path)
654 654
655 655 @propertycache
656 656 def _filerev(self):
657 657 return self._filelog.rev(self._filenode)
658 658
659 659 @propertycache
660 660 def _repopath(self):
661 661 return self._path
662 662
663 663 def __nonzero__(self):
664 664 try:
665 665 self._filenode
666 666 return True
667 667 except error.LookupError:
668 668 # file is missing
669 669 return False
670 670
671 671 def __str__(self):
672 672 return "%s@%s" % (self.path(), self._changectx)
673 673
674 674 def __repr__(self):
675 675 return "<%s %s>" % (type(self).__name__, str(self))
676 676
677 677 def __hash__(self):
678 678 try:
679 679 return hash((self._path, self._filenode))
680 680 except AttributeError:
681 681 return id(self)
682 682
683 683 def __eq__(self, other):
684 684 try:
685 685 return (type(self) == type(other) and self._path == other._path
686 686 and self._filenode == other._filenode)
687 687 except AttributeError:
688 688 return False
689 689
690 690 def __ne__(self, other):
691 691 return not (self == other)
692 692
693 693 def filerev(self):
694 694 return self._filerev
695 695 def filenode(self):
696 696 return self._filenode
697 697 def flags(self):
698 698 return self._changectx.flags(self._path)
699 699 def filelog(self):
700 700 return self._filelog
701 701 def rev(self):
702 702 return self._changeid
703 703 def linkrev(self):
704 704 return self._filelog.linkrev(self._filerev)
705 705 def node(self):
706 706 return self._changectx.node()
707 707 def hex(self):
708 708 return self._changectx.hex()
709 709 def user(self):
710 710 return self._changectx.user()
711 711 def date(self):
712 712 return self._changectx.date()
713 713 def files(self):
714 714 return self._changectx.files()
715 715 def description(self):
716 716 return self._changectx.description()
717 717 def branch(self):
718 718 return self._changectx.branch()
719 719 def extra(self):
720 720 return self._changectx.extra()
721 721 def phase(self):
722 722 return self._changectx.phase()
723 723 def phasestr(self):
724 724 return self._changectx.phasestr()
725 725 def manifest(self):
726 726 return self._changectx.manifest()
727 727 def changectx(self):
728 728 return self._changectx
729 729 def repo(self):
730 730 return self._repo
731 731
732 732 def path(self):
733 733 return self._path
734 734
735 735 def isbinary(self):
736 736 try:
737 737 return util.binary(self.data())
738 738 except IOError:
739 739 return False
740 740 def isexec(self):
741 741 return 'x' in self.flags()
742 742 def islink(self):
743 743 return 'l' in self.flags()
744 744
745 745 def cmp(self, fctx):
746 746 """compare with other file context
747 747
748 748 returns True if different than fctx.
749 749 """
750 750 if (fctx._filerev is None
751 751 and (self._repo._encodefilterpats
752 752 # if file data starts with '\1\n', empty metadata block is
753 753 # prepended, which adds 4 bytes to filelog.size().
754 754 or self.size() - 4 == fctx.size())
755 755 or self.size() == fctx.size()):
756 756 return self._filelog.cmp(self._filenode, fctx.data())
757 757
758 758 return True
759 759
760 760 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
761 761 """return the first ancestor of <srcrev> introducing <fnode>
762 762
763 763 If the linkrev of the file revision does not point to an ancestor of
764 764 srcrev, we'll walk down the ancestors until we find one introducing
765 765 this file revision.
766 766
767 767 :repo: a localrepository object (used to access changelog and manifest)
768 768 :path: the file path
769 769 :fnode: the nodeid of the file revision
770 770 :filelog: the filelog of this path
771 771 :srcrev: the changeset revision we search ancestors from
772 772 :inclusive: if true, the src revision will also be checked
773 773 """
774 774 repo = self._repo
775 775 cl = repo.unfiltered().changelog
776 776 ma = repo.manifest
777 777 # fetch the linkrev
778 778 fr = filelog.rev(fnode)
779 779 lkr = filelog.linkrev(fr)
780 780 # hack to reuse ancestor computation when searching for renames
781 781 memberanc = getattr(self, '_ancestrycontext', None)
782 782 iteranc = None
783 783 if srcrev is None:
784 784 # wctx case, used by workingfilectx during mergecopy
785 785 revs = [p.rev() for p in self._repo[None].parents()]
786 786 inclusive = True # we skipped the real (revless) source
787 787 else:
788 788 revs = [srcrev]
789 789 if memberanc is None:
790 790 memberanc = iteranc = cl.ancestors(revs, lkr,
791 791 inclusive=inclusive)
792 792 # check if this linkrev is an ancestor of srcrev
793 793 if lkr not in memberanc:
794 794 if iteranc is None:
795 795 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
796 796 for a in iteranc:
797 797 ac = cl.read(a) # get changeset data (we avoid object creation)
798 798 if path in ac[3]: # checking the 'files' field.
799 799 # The file has been touched, check if the content is
800 800 # similar to the one we search for.
801 801 if fnode == ma.readfast(ac[0]).get(path):
802 802 return a
803 803 # In theory, we should never get out of that loop without a result.
804 804 # But if manifest uses a buggy file revision (not children of the
805 805 # one it replaces) we could. Such a buggy situation will likely
806 806 # result is crash somewhere else at to some point.
807 807 return lkr
808 808
809 809 def introrev(self):
810 810 """return the rev of the changeset which introduced this file revision
811 811
812 812 This method is different from linkrev because it take into account the
813 813 changeset the filectx was created from. It ensures the returned
814 814 revision is one of its ancestors. This prevents bugs from
815 815 'linkrev-shadowing' when a file revision is used by multiple
816 816 changesets.
817 817 """
818 818 lkr = self.linkrev()
819 819 attrs = vars(self)
820 820 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
821 821 if noctx or self.rev() == lkr:
822 822 return self.linkrev()
823 823 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
824 824 self.rev(), inclusive=True)
825 825
826 826 def _parentfilectx(self, path, fileid, filelog):
827 827 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
828 828 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
829 829 if '_changeid' in vars(self) or '_changectx' in vars(self):
830 830 # If self is associated with a changeset (probably explicitly
831 831 # fed), ensure the created filectx is associated with a
832 832 # changeset that is an ancestor of self.changectx.
833 833 # This lets us later use _adjustlinkrev to get a correct link.
834 834 fctx._descendantrev = self.rev()
835 835 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
836 836 elif '_descendantrev' in vars(self):
837 837 # Otherwise propagate _descendantrev if we have one associated.
838 838 fctx._descendantrev = self._descendantrev
839 839 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
840 840 return fctx
841 841
842 842 def parents(self):
843 843 _path = self._path
844 844 fl = self._filelog
845 845 parents = self._filelog.parents(self._filenode)
846 846 pl = [(_path, node, fl) for node in parents if node != nullid]
847 847
848 848 r = fl.renamed(self._filenode)
849 849 if r:
850 850 # - In the simple rename case, both parent are nullid, pl is empty.
851 851 # - In case of merge, only one of the parent is null id and should
852 852 # be replaced with the rename information. This parent is -always-
853 853 # the first one.
854 854 #
855 855 # As null id have always been filtered out in the previous list
856 856 # comprehension, inserting to 0 will always result in "replacing
857 857 # first nullid parent with rename information.
858 858 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
859 859
860 860 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
861 861
862 862 def p1(self):
863 863 return self.parents()[0]
864 864
865 865 def p2(self):
866 866 p = self.parents()
867 867 if len(p) == 2:
868 868 return p[1]
869 869 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
870 870
871 871 def annotate(self, follow=False, linenumber=None, diffopts=None):
872 872 '''returns a list of tuples of (ctx, line) for each line
873 873 in the file, where ctx is the filectx of the node where
874 874 that line was last changed.
875 875 This returns tuples of ((ctx, linenumber), line) for each line,
876 876 if "linenumber" parameter is NOT "None".
877 877 In such tuples, linenumber means one at the first appearance
878 878 in the managed file.
879 879 To reduce annotation cost,
880 880 this returns fixed value(False is used) as linenumber,
881 881 if "linenumber" parameter is "False".'''
882 882
883 883 if linenumber is None:
884 884 def decorate(text, rev):
885 885 return ([rev] * len(text.splitlines()), text)
886 886 elif linenumber:
887 887 def decorate(text, rev):
888 888 size = len(text.splitlines())
889 889 return ([(rev, i) for i in xrange(1, size + 1)], text)
890 890 else:
891 891 def decorate(text, rev):
892 892 return ([(rev, False)] * len(text.splitlines()), text)
893 893
894 894 def pair(parent, child):
895 895 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
896 896 refine=True)
897 897 for (a1, a2, b1, b2), t in blocks:
898 898 # Changed blocks ('!') or blocks made only of blank lines ('~')
899 899 # belong to the child.
900 900 if t == '=':
901 901 child[0][b1:b2] = parent[0][a1:a2]
902 902 return child
903 903
904 904 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
905 905
906 906 def parents(f):
907 907 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
908 908 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
909 909 # from the topmost introrev (= srcrev) down to p.linkrev() if it
910 910 # isn't an ancestor of the srcrev.
911 911 f._changeid
912 912 pl = f.parents()
913 913
914 914 # Don't return renamed parents if we aren't following.
915 915 if not follow:
916 916 pl = [p for p in pl if p.path() == f.path()]
917 917
918 918 # renamed filectx won't have a filelog yet, so set it
919 919 # from the cache to save time
920 920 for p in pl:
921 921 if not '_filelog' in p.__dict__:
922 922 p._filelog = getlog(p.path())
923 923
924 924 return pl
925 925
926 926 # use linkrev to find the first changeset where self appeared
927 927 base = self
928 928 introrev = self.introrev()
929 929 if self.rev() != introrev:
930 930 base = self.filectx(self.filenode(), changeid=introrev)
931 931 if getattr(base, '_ancestrycontext', None) is None:
932 932 cl = self._repo.changelog
933 933 if introrev is None:
934 934 # wctx is not inclusive, but works because _ancestrycontext
935 935 # is used to test filelog revisions
936 936 ac = cl.ancestors([p.rev() for p in base.parents()],
937 937 inclusive=True)
938 938 else:
939 939 ac = cl.ancestors([introrev], inclusive=True)
940 940 base._ancestrycontext = ac
941 941
942 942 # This algorithm would prefer to be recursive, but Python is a
943 943 # bit recursion-hostile. Instead we do an iterative
944 944 # depth-first search.
945 945
946 946 visit = [base]
947 947 hist = {}
948 948 pcache = {}
949 949 needed = {base: 1}
950 950 while visit:
951 951 f = visit[-1]
952 952 pcached = f in pcache
953 953 if not pcached:
954 954 pcache[f] = parents(f)
955 955
956 956 ready = True
957 957 pl = pcache[f]
958 958 for p in pl:
959 959 if p not in hist:
960 960 ready = False
961 961 visit.append(p)
962 962 if not pcached:
963 963 needed[p] = needed.get(p, 0) + 1
964 964 if ready:
965 965 visit.pop()
966 966 reusable = f in hist
967 967 if reusable:
968 968 curr = hist[f]
969 969 else:
970 970 curr = decorate(f.data(), f)
971 971 for p in pl:
972 972 if not reusable:
973 973 curr = pair(hist[p], curr)
974 974 if needed[p] == 1:
975 975 del hist[p]
976 976 del needed[p]
977 977 else:
978 978 needed[p] -= 1
979 979
980 980 hist[f] = curr
981 981 pcache[f] = []
982 982
983 983 return zip(hist[base][0], hist[base][1].splitlines(True))
984 984
985 985 def ancestors(self, followfirst=False):
986 986 visit = {}
987 987 c = self
988 988 if followfirst:
989 989 cut = 1
990 990 else:
991 991 cut = None
992 992
993 993 while True:
994 994 for parent in c.parents()[:cut]:
995 995 visit[(parent.linkrev(), parent.filenode())] = parent
996 996 if not visit:
997 997 break
998 998 c = visit.pop(max(visit))
999 999 yield c
1000 1000
1001 1001 class filectx(basefilectx):
1002 1002 """A filecontext object makes access to data related to a particular
1003 1003 filerevision convenient."""
1004 1004 def __init__(self, repo, path, changeid=None, fileid=None,
1005 1005 filelog=None, changectx=None):
1006 1006 """changeid can be a changeset revision, node, or tag.
1007 1007 fileid can be a file revision or node."""
1008 1008 self._repo = repo
1009 1009 self._path = path
1010 1010
1011 1011 assert (changeid is not None
1012 1012 or fileid is not None
1013 1013 or changectx is not None), \
1014 1014 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1015 1015 % (changeid, fileid, changectx))
1016 1016
1017 1017 if filelog is not None:
1018 1018 self._filelog = filelog
1019 1019
1020 1020 if changeid is not None:
1021 1021 self._changeid = changeid
1022 1022 if changectx is not None:
1023 1023 self._changectx = changectx
1024 1024 if fileid is not None:
1025 1025 self._fileid = fileid
1026 1026
1027 1027 @propertycache
1028 1028 def _changectx(self):
1029 1029 try:
1030 1030 return changectx(self._repo, self._changeid)
1031 1031 except error.FilteredRepoLookupError:
1032 1032 # Linkrev may point to any revision in the repository. When the
1033 1033 # repository is filtered this may lead to `filectx` trying to build
1034 1034 # `changectx` for filtered revision. In such case we fallback to
1035 1035 # creating `changectx` on the unfiltered version of the reposition.
1036 1036 # This fallback should not be an issue because `changectx` from
1037 1037 # `filectx` are not used in complex operations that care about
1038 1038 # filtering.
1039 1039 #
1040 1040 # This fallback is a cheap and dirty fix that prevent several
1041 1041 # crashes. It does not ensure the behavior is correct. However the
1042 1042 # behavior was not correct before filtering either and "incorrect
1043 1043 # behavior" is seen as better as "crash"
1044 1044 #
1045 1045 # Linkrevs have several serious troubles with filtering that are
1046 1046 # complicated to solve. Proper handling of the issue here should be
1047 1047 # considered when solving linkrev issue are on the table.
1048 1048 return changectx(self._repo.unfiltered(), self._changeid)
1049 1049
1050 1050 def filectx(self, fileid, changeid=None):
1051 1051 '''opens an arbitrary revision of the file without
1052 1052 opening a new filelog'''
1053 1053 return filectx(self._repo, self._path, fileid=fileid,
1054 1054 filelog=self._filelog, changeid=changeid)
1055 1055
1056 1056 def data(self):
1057 1057 try:
1058 1058 return self._filelog.read(self._filenode)
1059 1059 except error.CensoredNodeError:
1060 1060 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1061 1061 return ""
1062 1062 raise util.Abort(_("censored node: %s") % short(self._filenode),
1063 1063 hint=_("set censor.policy to ignore errors"))
1064 1064
1065 1065 def size(self):
1066 1066 return self._filelog.size(self._filerev)
1067 1067
1068 1068 def renamed(self):
1069 1069 """check if file was actually renamed in this changeset revision
1070 1070
1071 1071 If rename logged in file revision, we report copy for changeset only
1072 1072 if file revisions linkrev points back to the changeset in question
1073 1073 or both changeset parents contain different file revisions.
1074 1074 """
1075 1075
1076 1076 renamed = self._filelog.renamed(self._filenode)
1077 1077 if not renamed:
1078 1078 return renamed
1079 1079
1080 1080 if self.rev() == self.linkrev():
1081 1081 return renamed
1082 1082
1083 1083 name = self.path()
1084 1084 fnode = self._filenode
1085 1085 for p in self._changectx.parents():
1086 1086 try:
1087 1087 if fnode == p.filenode(name):
1088 1088 return None
1089 1089 except error.LookupError:
1090 1090 pass
1091 1091 return renamed
1092 1092
1093 1093 def children(self):
1094 1094 # hard for renames
1095 1095 c = self._filelog.children(self._filenode)
1096 1096 return [filectx(self._repo, self._path, fileid=x,
1097 1097 filelog=self._filelog) for x in c]
1098 1098
1099 1099 class committablectx(basectx):
1100 1100 """A committablectx object provides common functionality for a context that
1101 1101 wants the ability to commit, e.g. workingctx or memctx."""
1102 1102 def __init__(self, repo, text="", user=None, date=None, extra=None,
1103 1103 changes=None):
1104 1104 self._repo = repo
1105 1105 self._rev = None
1106 1106 self._node = None
1107 1107 self._text = text
1108 1108 if date:
1109 1109 self._date = util.parsedate(date)
1110 1110 if user:
1111 1111 self._user = user
1112 1112 if changes:
1113 1113 self._status = changes
1114 1114
1115 1115 self._extra = {}
1116 1116 if extra:
1117 1117 self._extra = extra.copy()
1118 1118 if 'branch' not in self._extra:
1119 1119 try:
1120 1120 branch = encoding.fromlocal(self._repo.dirstate.branch())
1121 1121 except UnicodeDecodeError:
1122 1122 raise util.Abort(_('branch name not in UTF-8!'))
1123 1123 self._extra['branch'] = branch
1124 1124 if self._extra['branch'] == '':
1125 1125 self._extra['branch'] = 'default'
1126 1126
1127 1127 def __str__(self):
1128 1128 return str(self._parents[0]) + "+"
1129 1129
1130 1130 def __nonzero__(self):
1131 1131 return True
1132 1132
1133 1133 def _buildflagfunc(self):
1134 1134 # Create a fallback function for getting file flags when the
1135 1135 # filesystem doesn't support them
1136 1136
1137 1137 copiesget = self._repo.dirstate.copies().get
1138 1138
1139 1139 if len(self._parents) < 2:
1140 1140 # when we have one parent, it's easy: copy from parent
1141 1141 man = self._parents[0].manifest()
1142 1142 def func(f):
1143 1143 f = copiesget(f, f)
1144 1144 return man.flags(f)
1145 1145 else:
1146 1146 # merges are tricky: we try to reconstruct the unstored
1147 1147 # result from the merge (issue1802)
1148 1148 p1, p2 = self._parents
1149 1149 pa = p1.ancestor(p2)
1150 1150 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1151 1151
1152 1152 def func(f):
1153 1153 f = copiesget(f, f) # may be wrong for merges with copies
1154 1154 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1155 1155 if fl1 == fl2:
1156 1156 return fl1
1157 1157 if fl1 == fla:
1158 1158 return fl2
1159 1159 if fl2 == fla:
1160 1160 return fl1
1161 1161 return '' # punt for conflicts
1162 1162
1163 1163 return func
1164 1164
1165 1165 @propertycache
1166 1166 def _flagfunc(self):
1167 1167 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1168 1168
1169 1169 @propertycache
1170 1170 def _manifest(self):
1171 1171 """generate a manifest corresponding to the values in self._status
1172 1172
1173 1173 This reuse the file nodeid from parent, but we append an extra letter
1174 1174 when modified. Modified files get an extra 'm' while added files get
1175 1175 an extra 'a'. This is used by manifests merge to see that files
1176 1176 are different and by update logic to avoid deleting newly added files.
1177 1177 """
1178 1178
1179 1179 man1 = self._parents[0].manifest()
1180 1180 man = man1.copy()
1181 1181 if len(self._parents) > 1:
1182 1182 man2 = self.p2().manifest()
1183 1183 def getman(f):
1184 1184 if f in man1:
1185 1185 return man1
1186 1186 return man2
1187 1187 else:
1188 1188 getman = lambda f: man1
1189 1189
1190 1190 copied = self._repo.dirstate.copies()
1191 1191 ff = self._flagfunc
1192 1192 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1193 1193 for f in l:
1194 1194 orig = copied.get(f, f)
1195 1195 man[f] = getman(orig).get(orig, nullid) + i
1196 1196 try:
1197 1197 man.setflag(f, ff(f))
1198 1198 except OSError:
1199 1199 pass
1200 1200
1201 1201 for f in self._status.deleted + self._status.removed:
1202 1202 if f in man:
1203 1203 del man[f]
1204 1204
1205 1205 return man
1206 1206
1207 1207 @propertycache
1208 1208 def _status(self):
1209 1209 return self._repo.status()
1210 1210
1211 1211 @propertycache
1212 1212 def _user(self):
1213 1213 return self._repo.ui.username()
1214 1214
1215 1215 @propertycache
1216 1216 def _date(self):
1217 1217 return util.makedate()
1218 1218
1219 1219 def subrev(self, subpath):
1220 1220 return None
1221 1221
1222 1222 def manifestnode(self):
1223 1223 return None
1224 1224 def user(self):
1225 1225 return self._user or self._repo.ui.username()
1226 1226 def date(self):
1227 1227 return self._date
1228 1228 def description(self):
1229 1229 return self._text
1230 1230 def files(self):
1231 1231 return sorted(self._status.modified + self._status.added +
1232 1232 self._status.removed)
1233 1233
1234 1234 def modified(self):
1235 1235 return self._status.modified
1236 1236 def added(self):
1237 1237 return self._status.added
1238 1238 def removed(self):
1239 1239 return self._status.removed
1240 1240 def deleted(self):
1241 1241 return self._status.deleted
1242 1242 def branch(self):
1243 1243 return encoding.tolocal(self._extra['branch'])
1244 1244 def closesbranch(self):
1245 1245 return 'close' in self._extra
1246 1246 def extra(self):
1247 1247 return self._extra
1248 1248
1249 1249 def tags(self):
1250 1250 return []
1251 1251
1252 1252 def bookmarks(self):
1253 1253 b = []
1254 1254 for p in self.parents():
1255 1255 b.extend(p.bookmarks())
1256 1256 return b
1257 1257
1258 1258 def phase(self):
1259 1259 phase = phases.draft # default phase to draft
1260 1260 for p in self.parents():
1261 1261 phase = max(phase, p.phase())
1262 1262 return phase
1263 1263
1264 1264 def hidden(self):
1265 1265 return False
1266 1266
1267 1267 def children(self):
1268 1268 return []
1269 1269
1270 1270 def flags(self, path):
1271 1271 if '_manifest' in self.__dict__:
1272 1272 try:
1273 1273 return self._manifest.flags(path)
1274 1274 except KeyError:
1275 1275 return ''
1276 1276
1277 1277 try:
1278 1278 return self._flagfunc(path)
1279 1279 except OSError:
1280 1280 return ''
1281 1281
1282 1282 def ancestor(self, c2):
1283 1283 """return the "best" ancestor context of self and c2"""
1284 1284 return self._parents[0].ancestor(c2) # punt on two parents for now
1285 1285
1286 1286 def walk(self, match):
1287 1287 '''Generates matching file names.'''
1288 1288 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1289 1289 True, False))
1290 1290
1291 1291 def matches(self, match):
1292 1292 return sorted(self._repo.dirstate.matches(match))
1293 1293
1294 1294 def ancestors(self):
1295 1295 for p in self._parents:
1296 1296 yield p
1297 1297 for a in self._repo.changelog.ancestors(
1298 1298 [p.rev() for p in self._parents]):
1299 1299 yield changectx(self._repo, a)
1300 1300
1301 1301 def markcommitted(self, node):
1302 1302 """Perform post-commit cleanup necessary after committing this ctx
1303 1303
1304 1304 Specifically, this updates backing stores this working context
1305 1305 wraps to reflect the fact that the changes reflected by this
1306 1306 workingctx have been committed. For example, it marks
1307 1307 modified and added files as normal in the dirstate.
1308 1308
1309 1309 """
1310 1310
1311 1311 self._repo.dirstate.beginparentchange()
1312 1312 for f in self.modified() + self.added():
1313 1313 self._repo.dirstate.normal(f)
1314 1314 for f in self.removed():
1315 1315 self._repo.dirstate.drop(f)
1316 1316 self._repo.dirstate.setparents(node)
1317 1317 self._repo.dirstate.endparentchange()
1318 1318
1319 1319 class workingctx(committablectx):
1320 1320 """A workingctx object makes access to data related to
1321 1321 the current working directory convenient.
1322 1322 date - any valid date string or (unixtime, offset), or None.
1323 1323 user - username string, or None.
1324 1324 extra - a dictionary of extra values, or None.
1325 1325 changes - a list of file lists as returned by localrepo.status()
1326 1326 or None to use the repository status.
1327 1327 """
1328 1328 def __init__(self, repo, text="", user=None, date=None, extra=None,
1329 1329 changes=None):
1330 1330 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1331 1331
1332 1332 def __iter__(self):
1333 1333 d = self._repo.dirstate
1334 1334 for f in d:
1335 1335 if d[f] != 'r':
1336 1336 yield f
1337 1337
1338 1338 def __contains__(self, key):
1339 1339 return self._repo.dirstate[key] not in "?r"
1340 1340
1341 1341 def hex(self):
1342 return "ff" * 20
1342 return hex(wdirid)
1343 1343
1344 1344 @propertycache
1345 1345 def _parents(self):
1346 1346 p = self._repo.dirstate.parents()
1347 1347 if p[1] == nullid:
1348 1348 p = p[:-1]
1349 1349 return [changectx(self._repo, x) for x in p]
1350 1350
1351 1351 def filectx(self, path, filelog=None):
1352 1352 """get a file context from the working directory"""
1353 1353 return workingfilectx(self._repo, path, workingctx=self,
1354 1354 filelog=filelog)
1355 1355
1356 1356 def dirty(self, missing=False, merge=True, branch=True):
1357 1357 "check whether a working directory is modified"
1358 1358 # check subrepos first
1359 1359 for s in sorted(self.substate):
1360 1360 if self.sub(s).dirty():
1361 1361 return True
1362 1362 # check current working dir
1363 1363 return ((merge and self.p2()) or
1364 1364 (branch and self.branch() != self.p1().branch()) or
1365 1365 self.modified() or self.added() or self.removed() or
1366 1366 (missing and self.deleted()))
1367 1367
1368 1368 def add(self, list, prefix=""):
1369 1369 join = lambda f: os.path.join(prefix, f)
1370 1370 wlock = self._repo.wlock()
1371 1371 ui, ds = self._repo.ui, self._repo.dirstate
1372 1372 try:
1373 1373 rejected = []
1374 1374 lstat = self._repo.wvfs.lstat
1375 1375 for f in list:
1376 1376 scmutil.checkportable(ui, join(f))
1377 1377 try:
1378 1378 st = lstat(f)
1379 1379 except OSError:
1380 1380 ui.warn(_("%s does not exist!\n") % join(f))
1381 1381 rejected.append(f)
1382 1382 continue
1383 1383 if st.st_size > 10000000:
1384 1384 ui.warn(_("%s: up to %d MB of RAM may be required "
1385 1385 "to manage this file\n"
1386 1386 "(use 'hg revert %s' to cancel the "
1387 1387 "pending addition)\n")
1388 1388 % (f, 3 * st.st_size // 1000000, join(f)))
1389 1389 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1390 1390 ui.warn(_("%s not added: only files and symlinks "
1391 1391 "supported currently\n") % join(f))
1392 1392 rejected.append(f)
1393 1393 elif ds[f] in 'amn':
1394 1394 ui.warn(_("%s already tracked!\n") % join(f))
1395 1395 elif ds[f] == 'r':
1396 1396 ds.normallookup(f)
1397 1397 else:
1398 1398 ds.add(f)
1399 1399 return rejected
1400 1400 finally:
1401 1401 wlock.release()
1402 1402
1403 1403 def forget(self, files, prefix=""):
1404 1404 join = lambda f: os.path.join(prefix, f)
1405 1405 wlock = self._repo.wlock()
1406 1406 try:
1407 1407 rejected = []
1408 1408 for f in files:
1409 1409 if f not in self._repo.dirstate:
1410 1410 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1411 1411 rejected.append(f)
1412 1412 elif self._repo.dirstate[f] != 'a':
1413 1413 self._repo.dirstate.remove(f)
1414 1414 else:
1415 1415 self._repo.dirstate.drop(f)
1416 1416 return rejected
1417 1417 finally:
1418 1418 wlock.release()
1419 1419
1420 1420 def undelete(self, list):
1421 1421 pctxs = self.parents()
1422 1422 wlock = self._repo.wlock()
1423 1423 try:
1424 1424 for f in list:
1425 1425 if self._repo.dirstate[f] != 'r':
1426 1426 self._repo.ui.warn(_("%s not removed!\n") % f)
1427 1427 else:
1428 1428 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1429 1429 t = fctx.data()
1430 1430 self._repo.wwrite(f, t, fctx.flags())
1431 1431 self._repo.dirstate.normal(f)
1432 1432 finally:
1433 1433 wlock.release()
1434 1434
1435 1435 def copy(self, source, dest):
1436 1436 try:
1437 1437 st = self._repo.wvfs.lstat(dest)
1438 1438 except OSError as err:
1439 1439 if err.errno != errno.ENOENT:
1440 1440 raise
1441 1441 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1442 1442 return
1443 1443 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1444 1444 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1445 1445 "symbolic link\n") % dest)
1446 1446 else:
1447 1447 wlock = self._repo.wlock()
1448 1448 try:
1449 1449 if self._repo.dirstate[dest] in '?':
1450 1450 self._repo.dirstate.add(dest)
1451 1451 elif self._repo.dirstate[dest] in 'r':
1452 1452 self._repo.dirstate.normallookup(dest)
1453 1453 self._repo.dirstate.copy(source, dest)
1454 1454 finally:
1455 1455 wlock.release()
1456 1456
1457 1457 def match(self, pats=[], include=None, exclude=None, default='glob',
1458 1458 listsubrepos=False, badfn=None):
1459 1459 r = self._repo
1460 1460
1461 1461 # Only a case insensitive filesystem needs magic to translate user input
1462 1462 # to actual case in the filesystem.
1463 1463 if not util.checkcase(r.root):
1464 1464 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1465 1465 exclude, default, r.auditor, self,
1466 1466 listsubrepos=listsubrepos,
1467 1467 badfn=badfn)
1468 1468 return matchmod.match(r.root, r.getcwd(), pats,
1469 1469 include, exclude, default,
1470 1470 auditor=r.auditor, ctx=self,
1471 1471 listsubrepos=listsubrepos, badfn=badfn)
1472 1472
1473 1473 def _filtersuspectsymlink(self, files):
1474 1474 if not files or self._repo.dirstate._checklink:
1475 1475 return files
1476 1476
1477 1477 # Symlink placeholders may get non-symlink-like contents
1478 1478 # via user error or dereferencing by NFS or Samba servers,
1479 1479 # so we filter out any placeholders that don't look like a
1480 1480 # symlink
1481 1481 sane = []
1482 1482 for f in files:
1483 1483 if self.flags(f) == 'l':
1484 1484 d = self[f].data()
1485 1485 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1486 1486 self._repo.ui.debug('ignoring suspect symlink placeholder'
1487 1487 ' "%s"\n' % f)
1488 1488 continue
1489 1489 sane.append(f)
1490 1490 return sane
1491 1491
1492 1492 def _checklookup(self, files):
1493 1493 # check for any possibly clean files
1494 1494 if not files:
1495 1495 return [], []
1496 1496
1497 1497 modified = []
1498 1498 fixup = []
1499 1499 pctx = self._parents[0]
1500 1500 # do a full compare of any files that might have changed
1501 1501 for f in sorted(files):
1502 1502 if (f not in pctx or self.flags(f) != pctx.flags(f)
1503 1503 or pctx[f].cmp(self[f])):
1504 1504 modified.append(f)
1505 1505 else:
1506 1506 fixup.append(f)
1507 1507
1508 1508 # update dirstate for files that are actually clean
1509 1509 if fixup:
1510 1510 try:
1511 1511 # updating the dirstate is optional
1512 1512 # so we don't wait on the lock
1513 1513 # wlock can invalidate the dirstate, so cache normal _after_
1514 1514 # taking the lock
1515 1515 wlock = self._repo.wlock(False)
1516 1516 normal = self._repo.dirstate.normal
1517 1517 try:
1518 1518 for f in fixup:
1519 1519 normal(f)
1520 1520 finally:
1521 1521 wlock.release()
1522 1522 except error.LockError:
1523 1523 pass
1524 1524 return modified, fixup
1525 1525
1526 1526 def _manifestmatches(self, match, s):
1527 1527 """Slow path for workingctx
1528 1528
1529 1529 The fast path is when we compare the working directory to its parent
1530 1530 which means this function is comparing with a non-parent; therefore we
1531 1531 need to build a manifest and return what matches.
1532 1532 """
1533 1533 mf = self._repo['.']._manifestmatches(match, s)
1534 1534 for f in s.modified + s.added:
1535 1535 mf[f] = _newnode
1536 1536 mf.setflag(f, self.flags(f))
1537 1537 for f in s.removed:
1538 1538 if f in mf:
1539 1539 del mf[f]
1540 1540 return mf
1541 1541
1542 1542 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1543 1543 unknown=False):
1544 1544 '''Gets the status from the dirstate -- internal use only.'''
1545 1545 listignored, listclean, listunknown = ignored, clean, unknown
1546 1546 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1547 1547 subrepos = []
1548 1548 if '.hgsub' in self:
1549 1549 subrepos = sorted(self.substate)
1550 1550 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1551 1551 listclean, listunknown)
1552 1552
1553 1553 # check for any possibly clean files
1554 1554 if cmp:
1555 1555 modified2, fixup = self._checklookup(cmp)
1556 1556 s.modified.extend(modified2)
1557 1557
1558 1558 # update dirstate for files that are actually clean
1559 1559 if fixup and listclean:
1560 1560 s.clean.extend(fixup)
1561 1561
1562 1562 if match.always():
1563 1563 # cache for performance
1564 1564 if s.unknown or s.ignored or s.clean:
1565 1565 # "_status" is cached with list*=False in the normal route
1566 1566 self._status = scmutil.status(s.modified, s.added, s.removed,
1567 1567 s.deleted, [], [], [])
1568 1568 else:
1569 1569 self._status = s
1570 1570
1571 1571 return s
1572 1572
1573 1573 def _buildstatus(self, other, s, match, listignored, listclean,
1574 1574 listunknown):
1575 1575 """build a status with respect to another context
1576 1576
1577 1577 This includes logic for maintaining the fast path of status when
1578 1578 comparing the working directory against its parent, which is to skip
1579 1579 building a new manifest if self (working directory) is not comparing
1580 1580 against its parent (repo['.']).
1581 1581 """
1582 1582 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1583 1583 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1584 1584 # might have accidentally ended up with the entire contents of the file
1585 1585 # they are supposed to be linking to.
1586 1586 s.modified[:] = self._filtersuspectsymlink(s.modified)
1587 1587 if other != self._repo['.']:
1588 1588 s = super(workingctx, self)._buildstatus(other, s, match,
1589 1589 listignored, listclean,
1590 1590 listunknown)
1591 1591 return s
1592 1592
1593 1593 def _matchstatus(self, other, match):
1594 1594 """override the match method with a filter for directory patterns
1595 1595
1596 1596 We use inheritance to customize the match.bad method only in cases of
1597 1597 workingctx since it belongs only to the working directory when
1598 1598 comparing against the parent changeset.
1599 1599
1600 1600 If we aren't comparing against the working directory's parent, then we
1601 1601 just use the default match object sent to us.
1602 1602 """
1603 1603 superself = super(workingctx, self)
1604 1604 match = superself._matchstatus(other, match)
1605 1605 if other != self._repo['.']:
1606 1606 def bad(f, msg):
1607 1607 # 'f' may be a directory pattern from 'match.files()',
1608 1608 # so 'f not in ctx1' is not enough
1609 1609 if f not in other and not other.hasdir(f):
1610 1610 self._repo.ui.warn('%s: %s\n' %
1611 1611 (self._repo.dirstate.pathto(f), msg))
1612 1612 match.bad = bad
1613 1613 return match
1614 1614
1615 1615 class committablefilectx(basefilectx):
1616 1616 """A committablefilectx provides common functionality for a file context
1617 1617 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1618 1618 def __init__(self, repo, path, filelog=None, ctx=None):
1619 1619 self._repo = repo
1620 1620 self._path = path
1621 1621 self._changeid = None
1622 1622 self._filerev = self._filenode = None
1623 1623
1624 1624 if filelog is not None:
1625 1625 self._filelog = filelog
1626 1626 if ctx:
1627 1627 self._changectx = ctx
1628 1628
1629 1629 def __nonzero__(self):
1630 1630 return True
1631 1631
1632 1632 def linkrev(self):
1633 1633 # linked to self._changectx no matter if file is modified or not
1634 1634 return self.rev()
1635 1635
1636 1636 def parents(self):
1637 1637 '''return parent filectxs, following copies if necessary'''
1638 1638 def filenode(ctx, path):
1639 1639 return ctx._manifest.get(path, nullid)
1640 1640
1641 1641 path = self._path
1642 1642 fl = self._filelog
1643 1643 pcl = self._changectx._parents
1644 1644 renamed = self.renamed()
1645 1645
1646 1646 if renamed:
1647 1647 pl = [renamed + (None,)]
1648 1648 else:
1649 1649 pl = [(path, filenode(pcl[0], path), fl)]
1650 1650
1651 1651 for pc in pcl[1:]:
1652 1652 pl.append((path, filenode(pc, path), fl))
1653 1653
1654 1654 return [self._parentfilectx(p, fileid=n, filelog=l)
1655 1655 for p, n, l in pl if n != nullid]
1656 1656
1657 1657 def children(self):
1658 1658 return []
1659 1659
1660 1660 class workingfilectx(committablefilectx):
1661 1661 """A workingfilectx object makes access to data related to a particular
1662 1662 file in the working directory convenient."""
1663 1663 def __init__(self, repo, path, filelog=None, workingctx=None):
1664 1664 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1665 1665
1666 1666 @propertycache
1667 1667 def _changectx(self):
1668 1668 return workingctx(self._repo)
1669 1669
1670 1670 def data(self):
1671 1671 return self._repo.wread(self._path)
1672 1672 def renamed(self):
1673 1673 rp = self._repo.dirstate.copied(self._path)
1674 1674 if not rp:
1675 1675 return None
1676 1676 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1677 1677
1678 1678 def size(self):
1679 1679 return self._repo.wvfs.lstat(self._path).st_size
1680 1680 def date(self):
1681 1681 t, tz = self._changectx.date()
1682 1682 try:
1683 1683 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1684 1684 except OSError as err:
1685 1685 if err.errno != errno.ENOENT:
1686 1686 raise
1687 1687 return (t, tz)
1688 1688
1689 1689 def cmp(self, fctx):
1690 1690 """compare with other file context
1691 1691
1692 1692 returns True if different than fctx.
1693 1693 """
1694 1694 # fctx should be a filectx (not a workingfilectx)
1695 1695 # invert comparison to reuse the same code path
1696 1696 return fctx.cmp(self)
1697 1697
1698 1698 def remove(self, ignoremissing=False):
1699 1699 """wraps unlink for a repo's working directory"""
1700 1700 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1701 1701
1702 1702 def write(self, data, flags):
1703 1703 """wraps repo.wwrite"""
1704 1704 self._repo.wwrite(self._path, data, flags)
1705 1705
1706 1706 class workingcommitctx(workingctx):
1707 1707 """A workingcommitctx object makes access to data related to
1708 1708 the revision being committed convenient.
1709 1709
1710 1710 This hides changes in the working directory, if they aren't
1711 1711 committed in this context.
1712 1712 """
1713 1713 def __init__(self, repo, changes,
1714 1714 text="", user=None, date=None, extra=None):
1715 1715 super(workingctx, self).__init__(repo, text, user, date, extra,
1716 1716 changes)
1717 1717
1718 1718 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1719 1719 unknown=False):
1720 1720 """Return matched files only in ``self._status``
1721 1721
1722 1722 Uncommitted files appear "clean" via this context, even if
1723 1723 they aren't actually so in the working directory.
1724 1724 """
1725 1725 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1726 1726 if clean:
1727 1727 clean = [f for f in self._manifest if f not in self._changedset]
1728 1728 else:
1729 1729 clean = []
1730 1730 return scmutil.status([f for f in self._status.modified if match(f)],
1731 1731 [f for f in self._status.added if match(f)],
1732 1732 [f for f in self._status.removed if match(f)],
1733 1733 [], [], [], clean)
1734 1734
1735 1735 @propertycache
1736 1736 def _changedset(self):
1737 1737 """Return the set of files changed in this context
1738 1738 """
1739 1739 changed = set(self._status.modified)
1740 1740 changed.update(self._status.added)
1741 1741 changed.update(self._status.removed)
1742 1742 return changed
1743 1743
1744 1744 class memctx(committablectx):
1745 1745 """Use memctx to perform in-memory commits via localrepo.commitctx().
1746 1746
1747 1747 Revision information is supplied at initialization time while
1748 1748 related files data and is made available through a callback
1749 1749 mechanism. 'repo' is the current localrepo, 'parents' is a
1750 1750 sequence of two parent revisions identifiers (pass None for every
1751 1751 missing parent), 'text' is the commit message and 'files' lists
1752 1752 names of files touched by the revision (normalized and relative to
1753 1753 repository root).
1754 1754
1755 1755 filectxfn(repo, memctx, path) is a callable receiving the
1756 1756 repository, the current memctx object and the normalized path of
1757 1757 requested file, relative to repository root. It is fired by the
1758 1758 commit function for every file in 'files', but calls order is
1759 1759 undefined. If the file is available in the revision being
1760 1760 committed (updated or added), filectxfn returns a memfilectx
1761 1761 object. If the file was removed, filectxfn raises an
1762 1762 IOError. Moved files are represented by marking the source file
1763 1763 removed and the new file added with copy information (see
1764 1764 memfilectx).
1765 1765
1766 1766 user receives the committer name and defaults to current
1767 1767 repository username, date is the commit date in any format
1768 1768 supported by util.parsedate() and defaults to current date, extra
1769 1769 is a dictionary of metadata or is left empty.
1770 1770 """
1771 1771
1772 1772 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1773 1773 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1774 1774 # this field to determine what to do in filectxfn.
1775 1775 _returnnoneformissingfiles = True
1776 1776
1777 1777 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1778 1778 date=None, extra=None, editor=False):
1779 1779 super(memctx, self).__init__(repo, text, user, date, extra)
1780 1780 self._rev = None
1781 1781 self._node = None
1782 1782 parents = [(p or nullid) for p in parents]
1783 1783 p1, p2 = parents
1784 1784 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1785 1785 files = sorted(set(files))
1786 1786 self._files = files
1787 1787 self.substate = {}
1788 1788
1789 1789 # if store is not callable, wrap it in a function
1790 1790 if not callable(filectxfn):
1791 1791 def getfilectx(repo, memctx, path):
1792 1792 fctx = filectxfn[path]
1793 1793 # this is weird but apparently we only keep track of one parent
1794 1794 # (why not only store that instead of a tuple?)
1795 1795 copied = fctx.renamed()
1796 1796 if copied:
1797 1797 copied = copied[0]
1798 1798 return memfilectx(repo, path, fctx.data(),
1799 1799 islink=fctx.islink(), isexec=fctx.isexec(),
1800 1800 copied=copied, memctx=memctx)
1801 1801 self._filectxfn = getfilectx
1802 1802 else:
1803 1803 # "util.cachefunc" reduces invocation of possibly expensive
1804 1804 # "filectxfn" for performance (e.g. converting from another VCS)
1805 1805 self._filectxfn = util.cachefunc(filectxfn)
1806 1806
1807 1807 if extra:
1808 1808 self._extra = extra.copy()
1809 1809 else:
1810 1810 self._extra = {}
1811 1811
1812 1812 if self._extra.get('branch', '') == '':
1813 1813 self._extra['branch'] = 'default'
1814 1814
1815 1815 if editor:
1816 1816 self._text = editor(self._repo, self, [])
1817 1817 self._repo.savecommitmessage(self._text)
1818 1818
1819 1819 def filectx(self, path, filelog=None):
1820 1820 """get a file context from the working directory
1821 1821
1822 1822 Returns None if file doesn't exist and should be removed."""
1823 1823 return self._filectxfn(self._repo, self, path)
1824 1824
1825 1825 def commit(self):
1826 1826 """commit context to the repo"""
1827 1827 return self._repo.commitctx(self)
1828 1828
1829 1829 @propertycache
1830 1830 def _manifest(self):
1831 1831 """generate a manifest based on the return values of filectxfn"""
1832 1832
1833 1833 # keep this simple for now; just worry about p1
1834 1834 pctx = self._parents[0]
1835 1835 man = pctx.manifest().copy()
1836 1836
1837 1837 for f in self._status.modified:
1838 1838 p1node = nullid
1839 1839 p2node = nullid
1840 1840 p = pctx[f].parents() # if file isn't in pctx, check p2?
1841 1841 if len(p) > 0:
1842 1842 p1node = p[0].node()
1843 1843 if len(p) > 1:
1844 1844 p2node = p[1].node()
1845 1845 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1846 1846
1847 1847 for f in self._status.added:
1848 1848 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1849 1849
1850 1850 for f in self._status.removed:
1851 1851 if f in man:
1852 1852 del man[f]
1853 1853
1854 1854 return man
1855 1855
1856 1856 @propertycache
1857 1857 def _status(self):
1858 1858 """Calculate exact status from ``files`` specified at construction
1859 1859 """
1860 1860 man1 = self.p1().manifest()
1861 1861 p2 = self._parents[1]
1862 1862 # "1 < len(self._parents)" can't be used for checking
1863 1863 # existence of the 2nd parent, because "memctx._parents" is
1864 1864 # explicitly initialized by the list, of which length is 2.
1865 1865 if p2.node() != nullid:
1866 1866 man2 = p2.manifest()
1867 1867 managing = lambda f: f in man1 or f in man2
1868 1868 else:
1869 1869 managing = lambda f: f in man1
1870 1870
1871 1871 modified, added, removed = [], [], []
1872 1872 for f in self._files:
1873 1873 if not managing(f):
1874 1874 added.append(f)
1875 1875 elif self[f]:
1876 1876 modified.append(f)
1877 1877 else:
1878 1878 removed.append(f)
1879 1879
1880 1880 return scmutil.status(modified, added, removed, [], [], [], [])
1881 1881
1882 1882 class memfilectx(committablefilectx):
1883 1883 """memfilectx represents an in-memory file to commit.
1884 1884
1885 1885 See memctx and committablefilectx for more details.
1886 1886 """
1887 1887 def __init__(self, repo, path, data, islink=False,
1888 1888 isexec=False, copied=None, memctx=None):
1889 1889 """
1890 1890 path is the normalized file path relative to repository root.
1891 1891 data is the file content as a string.
1892 1892 islink is True if the file is a symbolic link.
1893 1893 isexec is True if the file is executable.
1894 1894 copied is the source file path if current file was copied in the
1895 1895 revision being committed, or None."""
1896 1896 super(memfilectx, self).__init__(repo, path, None, memctx)
1897 1897 self._data = data
1898 1898 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1899 1899 self._copied = None
1900 1900 if copied:
1901 1901 self._copied = (copied, nullid)
1902 1902
1903 1903 def data(self):
1904 1904 return self._data
1905 1905 def size(self):
1906 1906 return len(self.data())
1907 1907 def flags(self):
1908 1908 return self._flags
1909 1909 def renamed(self):
1910 1910 return self._copied
1911 1911
1912 1912 def remove(self, ignoremissing=False):
1913 1913 """wraps unlink for a repo's working directory"""
1914 1914 # need to figure out what to do here
1915 1915 del self._changectx[self._path]
1916 1916
1917 1917 def write(self, data, flags):
1918 1918 """wraps repo.wwrite"""
1919 1919 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now