##// END OF EJS Templates
context: override workingctx.hex() to avoid a crash...
Matt Harbison -
r25590:183965a0 default
parent child Browse files
Show More
@@ -1,1912 +1,1915
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 return iter(self._manifest)
70 70
71 71 def _manifestmatches(self, match, s):
72 72 """generate a new manifest filtered by the match argument
73 73
74 74 This method is for internal use only and mainly exists to provide an
75 75 object oriented way for other contexts to customize the manifest
76 76 generation.
77 77 """
78 78 return self.manifest().matches(match)
79 79
80 80 def _matchstatus(self, other, match):
81 81 """return match.always if match is none
82 82
83 83 This internal method provides a way for child objects to override the
84 84 match operator.
85 85 """
86 86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 87
88 88 def _buildstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """build a status with respect to another context"""
91 91 # Load earliest manifest first for caching reasons. More specifically,
92 92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 95 # delta to what's in the cache. So that's one full reconstruction + one
96 96 # delta application.
97 97 if self.rev() is not None and self.rev() < other.rev():
98 98 self.manifest()
99 99 mf1 = other._manifestmatches(match, s)
100 100 mf2 = self._manifestmatches(match, s)
101 101
102 102 modified, added = [], []
103 103 removed = []
104 104 clean = []
105 105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 106 deletedset = set(deleted)
107 107 d = mf1.diff(mf2, clean=listclean)
108 108 for fn, value in d.iteritems():
109 109 if fn in deletedset:
110 110 continue
111 111 if value is None:
112 112 clean.append(fn)
113 113 continue
114 114 (node1, flag1), (node2, flag2) = value
115 115 if node1 is None:
116 116 added.append(fn)
117 117 elif node2 is None:
118 118 removed.append(fn)
119 119 elif node2 != _newnode:
120 120 # The file was not a new file in mf2, so an entry
121 121 # from diff is really a difference.
122 122 modified.append(fn)
123 123 elif self[fn].cmp(other[fn]):
124 124 # node2 was newnode, but the working file doesn't
125 125 # match the one in mf1.
126 126 modified.append(fn)
127 127 else:
128 128 clean.append(fn)
129 129
130 130 if removed:
131 131 # need to filter files if they are already reported as removed
132 132 unknown = [fn for fn in unknown if fn not in mf1]
133 133 ignored = [fn for fn in ignored if fn not in mf1]
134 134 # if they're deleted, don't report them as removed
135 135 removed = [fn for fn in removed if fn not in deletedset]
136 136
137 137 return scmutil.status(modified, added, removed, deleted, unknown,
138 138 ignored, clean)
139 139
140 140 @propertycache
141 141 def substate(self):
142 142 return subrepo.state(self, self._repo.ui)
143 143
144 144 def subrev(self, subpath):
145 145 return self.substate[subpath][1]
146 146
147 147 def rev(self):
148 148 return self._rev
149 149 def node(self):
150 150 return self._node
151 151 def hex(self):
152 152 return hex(self.node())
153 153 def manifest(self):
154 154 return self._manifest
155 155 def repo(self):
156 156 return self._repo
157 157 def phasestr(self):
158 158 return phases.phasenames[self.phase()]
159 159 def mutable(self):
160 160 return self.phase() > phases.public
161 161
162 162 def getfileset(self, expr):
163 163 return fileset.getfileset(self, expr)
164 164
165 165 def obsolete(self):
166 166 """True if the changeset is obsolete"""
167 167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168 168
169 169 def extinct(self):
170 170 """True if the changeset is extinct"""
171 171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172 172
173 173 def unstable(self):
174 174 """True if the changeset is not obsolete but it's ancestor are"""
175 175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176 176
177 177 def bumped(self):
178 178 """True if the changeset try to be a successor of a public changeset
179 179
180 180 Only non-public and non-obsolete changesets may be bumped.
181 181 """
182 182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183 183
184 184 def divergent(self):
185 185 """Is a successors of a changeset with multiple possible successors set
186 186
187 187 Only non-public and non-obsolete changesets may be divergent.
188 188 """
189 189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190 190
191 191 def troubled(self):
192 192 """True if the changeset is either unstable, bumped or divergent"""
193 193 return self.unstable() or self.bumped() or self.divergent()
194 194
195 195 def troubles(self):
196 196 """return the list of troubles affecting this changesets.
197 197
198 198 Troubles are returned as strings. possible values are:
199 199 - unstable,
200 200 - bumped,
201 201 - divergent.
202 202 """
203 203 troubles = []
204 204 if self.unstable():
205 205 troubles.append('unstable')
206 206 if self.bumped():
207 207 troubles.append('bumped')
208 208 if self.divergent():
209 209 troubles.append('divergent')
210 210 return troubles
211 211
212 212 def parents(self):
213 213 """return contexts for each parent changeset"""
214 214 return self._parents
215 215
216 216 def p1(self):
217 217 return self._parents[0]
218 218
219 219 def p2(self):
220 220 if len(self._parents) == 2:
221 221 return self._parents[1]
222 222 return changectx(self._repo, -1)
223 223
224 224 def _fileinfo(self, path):
225 225 if '_manifest' in self.__dict__:
226 226 try:
227 227 return self._manifest[path], self._manifest.flags(path)
228 228 except KeyError:
229 229 raise error.ManifestLookupError(self._node, path,
230 230 _('not found in manifest'))
231 231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 232 if path in self._manifestdelta:
233 233 return (self._manifestdelta[path],
234 234 self._manifestdelta.flags(path))
235 235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 236 if not node:
237 237 raise error.ManifestLookupError(self._node, path,
238 238 _('not found in manifest'))
239 239
240 240 return node, flag
241 241
242 242 def filenode(self, path):
243 243 return self._fileinfo(path)[0]
244 244
245 245 def flags(self, path):
246 246 try:
247 247 return self._fileinfo(path)[1]
248 248 except error.LookupError:
249 249 return ''
250 250
251 251 def sub(self, path):
252 252 return subrepo.subrepo(self, path)
253 253
254 254 def nullsub(self, path, pctx):
255 255 return subrepo.nullsubrepo(self, path, pctx)
256 256
257 257 def match(self, pats=[], include=None, exclude=None, default='glob',
258 258 listsubrepos=False, badfn=None):
259 259 r = self._repo
260 260 return matchmod.match(r.root, r.getcwd(), pats,
261 261 include, exclude, default,
262 262 auditor=r.auditor, ctx=self,
263 263 listsubrepos=listsubrepos, badfn=badfn)
264 264
265 265 def diff(self, ctx2=None, match=None, **opts):
266 266 """Returns a diff generator for the given contexts and matcher"""
267 267 if ctx2 is None:
268 268 ctx2 = self.p1()
269 269 if ctx2 is not None:
270 270 ctx2 = self._repo[ctx2]
271 271 diffopts = patch.diffopts(self._repo.ui, opts)
272 272 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
273 273
274 274 def dirs(self):
275 275 return self._manifest.dirs()
276 276
277 277 def hasdir(self, dir):
278 278 return self._manifest.hasdir(dir)
279 279
280 280 def dirty(self, missing=False, merge=True, branch=True):
281 281 return False
282 282
283 283 def status(self, other=None, match=None, listignored=False,
284 284 listclean=False, listunknown=False, listsubrepos=False):
285 285 """return status of files between two nodes or node and working
286 286 directory.
287 287
288 288 If other is None, compare this node with working directory.
289 289
290 290 returns (modified, added, removed, deleted, unknown, ignored, clean)
291 291 """
292 292
293 293 ctx1 = self
294 294 ctx2 = self._repo[other]
295 295
296 296 # This next code block is, admittedly, fragile logic that tests for
297 297 # reversing the contexts and wouldn't need to exist if it weren't for
298 298 # the fast (and common) code path of comparing the working directory
299 299 # with its first parent.
300 300 #
301 301 # What we're aiming for here is the ability to call:
302 302 #
303 303 # workingctx.status(parentctx)
304 304 #
305 305 # If we always built the manifest for each context and compared those,
306 306 # then we'd be done. But the special case of the above call means we
307 307 # just copy the manifest of the parent.
308 308 reversed = False
309 309 if (not isinstance(ctx1, changectx)
310 310 and isinstance(ctx2, changectx)):
311 311 reversed = True
312 312 ctx1, ctx2 = ctx2, ctx1
313 313
314 314 match = ctx2._matchstatus(ctx1, match)
315 315 r = scmutil.status([], [], [], [], [], [], [])
316 316 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
317 317 listunknown)
318 318
319 319 if reversed:
320 320 # Reverse added and removed. Clear deleted, unknown and ignored as
321 321 # these make no sense to reverse.
322 322 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
323 323 r.clean)
324 324
325 325 if listsubrepos:
326 326 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
327 327 rev2 = ctx2.subrev(subpath)
328 328 try:
329 329 submatch = matchmod.narrowmatcher(subpath, match)
330 330 s = sub.status(rev2, match=submatch, ignored=listignored,
331 331 clean=listclean, unknown=listunknown,
332 332 listsubrepos=True)
333 333 for rfiles, sfiles in zip(r, s):
334 334 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
335 335 except error.LookupError:
336 336 self._repo.ui.status(_("skipping missing "
337 337 "subrepository: %s\n") % subpath)
338 338
339 339 for l in r:
340 340 l.sort()
341 341
342 342 return r
343 343
344 344
345 345 def makememctx(repo, parents, text, user, date, branch, files, store,
346 346 editor=None, extra=None):
347 347 def getfilectx(repo, memctx, path):
348 348 data, mode, copied = store.getfile(path)
349 349 if data is None:
350 350 return None
351 351 islink, isexec = mode
352 352 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
353 353 copied=copied, memctx=memctx)
354 354 if extra is None:
355 355 extra = {}
356 356 if branch:
357 357 extra['branch'] = encoding.fromlocal(branch)
358 358 ctx = memctx(repo, parents, text, files, getfilectx, user,
359 359 date, extra, editor)
360 360 return ctx
361 361
362 362 class changectx(basectx):
363 363 """A changecontext object makes access to data related to a particular
364 364 changeset convenient. It represents a read-only context already present in
365 365 the repo."""
366 366 def __init__(self, repo, changeid=''):
367 367 """changeid is a revision number, node, or tag"""
368 368
369 369 # since basectx.__new__ already took care of copying the object, we
370 370 # don't need to do anything in __init__, so we just exit here
371 371 if isinstance(changeid, basectx):
372 372 return
373 373
374 374 if changeid == '':
375 375 changeid = '.'
376 376 self._repo = repo
377 377
378 378 try:
379 379 if isinstance(changeid, int):
380 380 self._node = repo.changelog.node(changeid)
381 381 self._rev = changeid
382 382 return
383 383 if isinstance(changeid, long):
384 384 changeid = str(changeid)
385 385 if changeid == 'null':
386 386 self._node = nullid
387 387 self._rev = nullrev
388 388 return
389 389 if changeid == 'tip':
390 390 self._node = repo.changelog.tip()
391 391 self._rev = repo.changelog.rev(self._node)
392 392 return
393 393 if changeid == '.' or changeid == repo.dirstate.p1():
394 394 # this is a hack to delay/avoid loading obsmarkers
395 395 # when we know that '.' won't be hidden
396 396 self._node = repo.dirstate.p1()
397 397 self._rev = repo.unfiltered().changelog.rev(self._node)
398 398 return
399 399 if len(changeid) == 20:
400 400 try:
401 401 self._node = changeid
402 402 self._rev = repo.changelog.rev(changeid)
403 403 return
404 404 except error.FilteredRepoLookupError:
405 405 raise
406 406 except LookupError:
407 407 pass
408 408
409 409 try:
410 410 r = int(changeid)
411 411 if str(r) != changeid:
412 412 raise ValueError
413 413 l = len(repo.changelog)
414 414 if r < 0:
415 415 r += l
416 416 if r < 0 or r >= l:
417 417 raise ValueError
418 418 self._rev = r
419 419 self._node = repo.changelog.node(r)
420 420 return
421 421 except error.FilteredIndexError:
422 422 raise
423 423 except (ValueError, OverflowError, IndexError):
424 424 pass
425 425
426 426 if len(changeid) == 40:
427 427 try:
428 428 self._node = bin(changeid)
429 429 self._rev = repo.changelog.rev(self._node)
430 430 return
431 431 except error.FilteredLookupError:
432 432 raise
433 433 except (TypeError, LookupError):
434 434 pass
435 435
436 436 # lookup bookmarks through the name interface
437 437 try:
438 438 self._node = repo.names.singlenode(repo, changeid)
439 439 self._rev = repo.changelog.rev(self._node)
440 440 return
441 441 except KeyError:
442 442 pass
443 443 except error.FilteredRepoLookupError:
444 444 raise
445 445 except error.RepoLookupError:
446 446 pass
447 447
448 448 self._node = repo.unfiltered().changelog._partialmatch(changeid)
449 449 if self._node is not None:
450 450 self._rev = repo.changelog.rev(self._node)
451 451 return
452 452
453 453 # lookup failed
454 454 # check if it might have come from damaged dirstate
455 455 #
456 456 # XXX we could avoid the unfiltered if we had a recognizable
457 457 # exception for filtered changeset access
458 458 if changeid in repo.unfiltered().dirstate.parents():
459 459 msg = _("working directory has unknown parent '%s'!")
460 460 raise error.Abort(msg % short(changeid))
461 461 try:
462 462 if len(changeid) == 20:
463 463 changeid = hex(changeid)
464 464 except TypeError:
465 465 pass
466 466 except (error.FilteredIndexError, error.FilteredLookupError,
467 467 error.FilteredRepoLookupError):
468 468 if repo.filtername.startswith('visible'):
469 469 msg = _("hidden revision '%s'") % changeid
470 470 hint = _('use --hidden to access hidden revisions')
471 471 raise error.FilteredRepoLookupError(msg, hint=hint)
472 472 msg = _("filtered revision '%s' (not in '%s' subset)")
473 473 msg %= (changeid, repo.filtername)
474 474 raise error.FilteredRepoLookupError(msg)
475 475 except IndexError:
476 476 pass
477 477 raise error.RepoLookupError(
478 478 _("unknown revision '%s'") % changeid)
479 479
480 480 def __hash__(self):
481 481 try:
482 482 return hash(self._rev)
483 483 except AttributeError:
484 484 return id(self)
485 485
486 486 def __nonzero__(self):
487 487 return self._rev != nullrev
488 488
489 489 @propertycache
490 490 def _changeset(self):
491 491 return self._repo.changelog.read(self.rev())
492 492
493 493 @propertycache
494 494 def _manifest(self):
495 495 return self._repo.manifest.read(self._changeset[0])
496 496
497 497 @propertycache
498 498 def _manifestdelta(self):
499 499 return self._repo.manifest.readdelta(self._changeset[0])
500 500
501 501 @propertycache
502 502 def _parents(self):
503 503 p = self._repo.changelog.parentrevs(self._rev)
504 504 if p[1] == nullrev:
505 505 p = p[:-1]
506 506 return [changectx(self._repo, x) for x in p]
507 507
508 508 def changeset(self):
509 509 return self._changeset
510 510 def manifestnode(self):
511 511 return self._changeset[0]
512 512
513 513 def user(self):
514 514 return self._changeset[1]
515 515 def date(self):
516 516 return self._changeset[2]
517 517 def files(self):
518 518 return self._changeset[3]
519 519 def description(self):
520 520 return self._changeset[4]
521 521 def branch(self):
522 522 return encoding.tolocal(self._changeset[5].get("branch"))
523 523 def closesbranch(self):
524 524 return 'close' in self._changeset[5]
525 525 def extra(self):
526 526 return self._changeset[5]
527 527 def tags(self):
528 528 return self._repo.nodetags(self._node)
529 529 def bookmarks(self):
530 530 return self._repo.nodebookmarks(self._node)
531 531 def phase(self):
532 532 return self._repo._phasecache.phase(self._repo, self._rev)
533 533 def hidden(self):
534 534 return self._rev in repoview.filterrevs(self._repo, 'visible')
535 535
536 536 def children(self):
537 537 """return contexts for each child changeset"""
538 538 c = self._repo.changelog.children(self._node)
539 539 return [changectx(self._repo, x) for x in c]
540 540
541 541 def ancestors(self):
542 542 for a in self._repo.changelog.ancestors([self._rev]):
543 543 yield changectx(self._repo, a)
544 544
545 545 def descendants(self):
546 546 for d in self._repo.changelog.descendants([self._rev]):
547 547 yield changectx(self._repo, d)
548 548
549 549 def filectx(self, path, fileid=None, filelog=None):
550 550 """get a file context from this changeset"""
551 551 if fileid is None:
552 552 fileid = self.filenode(path)
553 553 return filectx(self._repo, path, fileid=fileid,
554 554 changectx=self, filelog=filelog)
555 555
556 556 def ancestor(self, c2, warn=False):
557 557 """return the "best" ancestor context of self and c2
558 558
559 559 If there are multiple candidates, it will show a message and check
560 560 merge.preferancestor configuration before falling back to the
561 561 revlog ancestor."""
562 562 # deal with workingctxs
563 563 n2 = c2._node
564 564 if n2 is None:
565 565 n2 = c2._parents[0]._node
566 566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
567 567 if not cahs:
568 568 anc = nullid
569 569 elif len(cahs) == 1:
570 570 anc = cahs[0]
571 571 else:
572 572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
573 573 try:
574 574 ctx = changectx(self._repo, r)
575 575 except error.RepoLookupError:
576 576 continue
577 577 anc = ctx.node()
578 578 if anc in cahs:
579 579 break
580 580 else:
581 581 anc = self._repo.changelog.ancestor(self._node, n2)
582 582 if warn:
583 583 self._repo.ui.status(
584 584 (_("note: using %s as ancestor of %s and %s\n") %
585 585 (short(anc), short(self._node), short(n2))) +
586 586 ''.join(_(" alternatively, use --config "
587 587 "merge.preferancestor=%s\n") %
588 588 short(n) for n in sorted(cahs) if n != anc))
589 589 return changectx(self._repo, anc)
590 590
591 591 def descendant(self, other):
592 592 """True if other is descendant of this changeset"""
593 593 return self._repo.changelog.descendant(self._rev, other._rev)
594 594
595 595 def walk(self, match):
596 596 '''Generates matching file names.'''
597 597
598 598 # Wrap match.bad method to have message with nodeid
599 599 def bad(fn, msg):
600 600 # The manifest doesn't know about subrepos, so don't complain about
601 601 # paths into valid subrepos.
602 602 if any(fn == s or fn.startswith(s + '/')
603 603 for s in self.substate):
604 604 return
605 605 match.bad(fn, _('no such file in rev %s') % self)
606 606
607 607 m = matchmod.badmatch(match, bad)
608 608 return self._manifest.walk(m)
609 609
610 610 def matches(self, match):
611 611 return self.walk(match)
612 612
613 613 class basefilectx(object):
614 614 """A filecontext object represents the common logic for its children:
615 615 filectx: read-only access to a filerevision that is already present
616 616 in the repo,
617 617 workingfilectx: a filecontext that represents files from the working
618 618 directory,
619 619 memfilectx: a filecontext that represents files in-memory."""
620 620 def __new__(cls, repo, path, *args, **kwargs):
621 621 return super(basefilectx, cls).__new__(cls)
622 622
623 623 @propertycache
624 624 def _filelog(self):
625 625 return self._repo.file(self._path)
626 626
627 627 @propertycache
628 628 def _changeid(self):
629 629 if '_changeid' in self.__dict__:
630 630 return self._changeid
631 631 elif '_changectx' in self.__dict__:
632 632 return self._changectx.rev()
633 633 elif '_descendantrev' in self.__dict__:
634 634 # this file context was created from a revision with a known
635 635 # descendant, we can (lazily) correct for linkrev aliases
636 636 return self._adjustlinkrev(self._path, self._filelog,
637 637 self._filenode, self._descendantrev)
638 638 else:
639 639 return self._filelog.linkrev(self._filerev)
640 640
641 641 @propertycache
642 642 def _filenode(self):
643 643 if '_fileid' in self.__dict__:
644 644 return self._filelog.lookup(self._fileid)
645 645 else:
646 646 return self._changectx.filenode(self._path)
647 647
648 648 @propertycache
649 649 def _filerev(self):
650 650 return self._filelog.rev(self._filenode)
651 651
652 652 @propertycache
653 653 def _repopath(self):
654 654 return self._path
655 655
656 656 def __nonzero__(self):
657 657 try:
658 658 self._filenode
659 659 return True
660 660 except error.LookupError:
661 661 # file is missing
662 662 return False
663 663
664 664 def __str__(self):
665 665 return "%s@%s" % (self.path(), self._changectx)
666 666
667 667 def __repr__(self):
668 668 return "<%s %s>" % (type(self).__name__, str(self))
669 669
670 670 def __hash__(self):
671 671 try:
672 672 return hash((self._path, self._filenode))
673 673 except AttributeError:
674 674 return id(self)
675 675
676 676 def __eq__(self, other):
677 677 try:
678 678 return (type(self) == type(other) and self._path == other._path
679 679 and self._filenode == other._filenode)
680 680 except AttributeError:
681 681 return False
682 682
683 683 def __ne__(self, other):
684 684 return not (self == other)
685 685
686 686 def filerev(self):
687 687 return self._filerev
688 688 def filenode(self):
689 689 return self._filenode
690 690 def flags(self):
691 691 return self._changectx.flags(self._path)
692 692 def filelog(self):
693 693 return self._filelog
694 694 def rev(self):
695 695 return self._changeid
696 696 def linkrev(self):
697 697 return self._filelog.linkrev(self._filerev)
698 698 def node(self):
699 699 return self._changectx.node()
700 700 def hex(self):
701 701 return self._changectx.hex()
702 702 def user(self):
703 703 return self._changectx.user()
704 704 def date(self):
705 705 return self._changectx.date()
706 706 def files(self):
707 707 return self._changectx.files()
708 708 def description(self):
709 709 return self._changectx.description()
710 710 def branch(self):
711 711 return self._changectx.branch()
712 712 def extra(self):
713 713 return self._changectx.extra()
714 714 def phase(self):
715 715 return self._changectx.phase()
716 716 def phasestr(self):
717 717 return self._changectx.phasestr()
718 718 def manifest(self):
719 719 return self._changectx.manifest()
720 720 def changectx(self):
721 721 return self._changectx
722 722 def repo(self):
723 723 return self._repo
724 724
725 725 def path(self):
726 726 return self._path
727 727
728 728 def isbinary(self):
729 729 try:
730 730 return util.binary(self.data())
731 731 except IOError:
732 732 return False
733 733 def isexec(self):
734 734 return 'x' in self.flags()
735 735 def islink(self):
736 736 return 'l' in self.flags()
737 737
738 738 def cmp(self, fctx):
739 739 """compare with other file context
740 740
741 741 returns True if different than fctx.
742 742 """
743 743 if (fctx._filerev is None
744 744 and (self._repo._encodefilterpats
745 745 # if file data starts with '\1\n', empty metadata block is
746 746 # prepended, which adds 4 bytes to filelog.size().
747 747 or self.size() - 4 == fctx.size())
748 748 or self.size() == fctx.size()):
749 749 return self._filelog.cmp(self._filenode, fctx.data())
750 750
751 751 return True
752 752
753 753 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
754 754 """return the first ancestor of <srcrev> introducing <fnode>
755 755
756 756 If the linkrev of the file revision does not point to an ancestor of
757 757 srcrev, we'll walk down the ancestors until we find one introducing
758 758 this file revision.
759 759
760 760 :repo: a localrepository object (used to access changelog and manifest)
761 761 :path: the file path
762 762 :fnode: the nodeid of the file revision
763 763 :filelog: the filelog of this path
764 764 :srcrev: the changeset revision we search ancestors from
765 765 :inclusive: if true, the src revision will also be checked
766 766 """
767 767 repo = self._repo
768 768 cl = repo.unfiltered().changelog
769 769 ma = repo.manifest
770 770 # fetch the linkrev
771 771 fr = filelog.rev(fnode)
772 772 lkr = filelog.linkrev(fr)
773 773 # hack to reuse ancestor computation when searching for renames
774 774 memberanc = getattr(self, '_ancestrycontext', None)
775 775 iteranc = None
776 776 if srcrev is None:
777 777 # wctx case, used by workingfilectx during mergecopy
778 778 revs = [p.rev() for p in self._repo[None].parents()]
779 779 inclusive = True # we skipped the real (revless) source
780 780 else:
781 781 revs = [srcrev]
782 782 if memberanc is None:
783 783 memberanc = iteranc = cl.ancestors(revs, lkr,
784 784 inclusive=inclusive)
785 785 # check if this linkrev is an ancestor of srcrev
786 786 if lkr not in memberanc:
787 787 if iteranc is None:
788 788 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
789 789 for a in iteranc:
790 790 ac = cl.read(a) # get changeset data (we avoid object creation)
791 791 if path in ac[3]: # checking the 'files' field.
792 792 # The file has been touched, check if the content is
793 793 # similar to the one we search for.
794 794 if fnode == ma.readfast(ac[0]).get(path):
795 795 return a
796 796 # In theory, we should never get out of that loop without a result.
797 797 # But if manifest uses a buggy file revision (not children of the
798 798 # one it replaces) we could. Such a buggy situation will likely
799 799 # result is crash somewhere else at to some point.
800 800 return lkr
801 801
802 802 def introrev(self):
803 803 """return the rev of the changeset which introduced this file revision
804 804
805 805 This method is different from linkrev because it take into account the
806 806 changeset the filectx was created from. It ensures the returned
807 807 revision is one of its ancestors. This prevents bugs from
808 808 'linkrev-shadowing' when a file revision is used by multiple
809 809 changesets.
810 810 """
811 811 lkr = self.linkrev()
812 812 attrs = vars(self)
813 813 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
814 814 if noctx or self.rev() == lkr:
815 815 return self.linkrev()
816 816 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
817 817 self.rev(), inclusive=True)
818 818
819 819 def _parentfilectx(self, path, fileid, filelog):
820 820 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
821 821 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
822 822 if '_changeid' in vars(self) or '_changectx' in vars(self):
823 823 # If self is associated with a changeset (probably explicitly
824 824 # fed), ensure the created filectx is associated with a
825 825 # changeset that is an ancestor of self.changectx.
826 826 # This lets us later use _adjustlinkrev to get a correct link.
827 827 fctx._descendantrev = self.rev()
828 828 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
829 829 elif '_descendantrev' in vars(self):
830 830 # Otherwise propagate _descendantrev if we have one associated.
831 831 fctx._descendantrev = self._descendantrev
832 832 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
833 833 return fctx
834 834
835 835 def parents(self):
836 836 _path = self._path
837 837 fl = self._filelog
838 838 parents = self._filelog.parents(self._filenode)
839 839 pl = [(_path, node, fl) for node in parents if node != nullid]
840 840
841 841 r = fl.renamed(self._filenode)
842 842 if r:
843 843 # - In the simple rename case, both parent are nullid, pl is empty.
844 844 # - In case of merge, only one of the parent is null id and should
845 845 # be replaced with the rename information. This parent is -always-
846 846 # the first one.
847 847 #
848 848 # As null id have always been filtered out in the previous list
849 849 # comprehension, inserting to 0 will always result in "replacing
850 850 # first nullid parent with rename information.
851 851 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
852 852
853 853 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
854 854
855 855 def p1(self):
856 856 return self.parents()[0]
857 857
858 858 def p2(self):
859 859 p = self.parents()
860 860 if len(p) == 2:
861 861 return p[1]
862 862 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
863 863
864 864 def annotate(self, follow=False, linenumber=None, diffopts=None):
865 865 '''returns a list of tuples of (ctx, line) for each line
866 866 in the file, where ctx is the filectx of the node where
867 867 that line was last changed.
868 868 This returns tuples of ((ctx, linenumber), line) for each line,
869 869 if "linenumber" parameter is NOT "None".
870 870 In such tuples, linenumber means one at the first appearance
871 871 in the managed file.
872 872 To reduce annotation cost,
873 873 this returns fixed value(False is used) as linenumber,
874 874 if "linenumber" parameter is "False".'''
875 875
876 876 if linenumber is None:
877 877 def decorate(text, rev):
878 878 return ([rev] * len(text.splitlines()), text)
879 879 elif linenumber:
880 880 def decorate(text, rev):
881 881 size = len(text.splitlines())
882 882 return ([(rev, i) for i in xrange(1, size + 1)], text)
883 883 else:
884 884 def decorate(text, rev):
885 885 return ([(rev, False)] * len(text.splitlines()), text)
886 886
887 887 def pair(parent, child):
888 888 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
889 889 refine=True)
890 890 for (a1, a2, b1, b2), t in blocks:
891 891 # Changed blocks ('!') or blocks made only of blank lines ('~')
892 892 # belong to the child.
893 893 if t == '=':
894 894 child[0][b1:b2] = parent[0][a1:a2]
895 895 return child
896 896
897 897 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
898 898
899 899 def parents(f):
900 900 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
901 901 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
902 902 # from the topmost introrev (= srcrev) down to p.linkrev() if it
903 903 # isn't an ancestor of the srcrev.
904 904 f._changeid
905 905 pl = f.parents()
906 906
907 907 # Don't return renamed parents if we aren't following.
908 908 if not follow:
909 909 pl = [p for p in pl if p.path() == f.path()]
910 910
911 911 # renamed filectx won't have a filelog yet, so set it
912 912 # from the cache to save time
913 913 for p in pl:
914 914 if not '_filelog' in p.__dict__:
915 915 p._filelog = getlog(p.path())
916 916
917 917 return pl
918 918
919 919 # use linkrev to find the first changeset where self appeared
920 920 base = self
921 921 introrev = self.introrev()
922 922 if self.rev() != introrev:
923 923 base = self.filectx(self.filenode(), changeid=introrev)
924 924 if getattr(base, '_ancestrycontext', None) is None:
925 925 cl = self._repo.changelog
926 926 if introrev is None:
927 927 # wctx is not inclusive, but works because _ancestrycontext
928 928 # is used to test filelog revisions
929 929 ac = cl.ancestors([p.rev() for p in base.parents()],
930 930 inclusive=True)
931 931 else:
932 932 ac = cl.ancestors([introrev], inclusive=True)
933 933 base._ancestrycontext = ac
934 934
935 935 # This algorithm would prefer to be recursive, but Python is a
936 936 # bit recursion-hostile. Instead we do an iterative
937 937 # depth-first search.
938 938
939 939 visit = [base]
940 940 hist = {}
941 941 pcache = {}
942 942 needed = {base: 1}
943 943 while visit:
944 944 f = visit[-1]
945 945 pcached = f in pcache
946 946 if not pcached:
947 947 pcache[f] = parents(f)
948 948
949 949 ready = True
950 950 pl = pcache[f]
951 951 for p in pl:
952 952 if p not in hist:
953 953 ready = False
954 954 visit.append(p)
955 955 if not pcached:
956 956 needed[p] = needed.get(p, 0) + 1
957 957 if ready:
958 958 visit.pop()
959 959 reusable = f in hist
960 960 if reusable:
961 961 curr = hist[f]
962 962 else:
963 963 curr = decorate(f.data(), f)
964 964 for p in pl:
965 965 if not reusable:
966 966 curr = pair(hist[p], curr)
967 967 if needed[p] == 1:
968 968 del hist[p]
969 969 del needed[p]
970 970 else:
971 971 needed[p] -= 1
972 972
973 973 hist[f] = curr
974 974 pcache[f] = []
975 975
976 976 return zip(hist[base][0], hist[base][1].splitlines(True))
977 977
978 978 def ancestors(self, followfirst=False):
979 979 visit = {}
980 980 c = self
981 981 if followfirst:
982 982 cut = 1
983 983 else:
984 984 cut = None
985 985
986 986 while True:
987 987 for parent in c.parents()[:cut]:
988 988 visit[(parent.linkrev(), parent.filenode())] = parent
989 989 if not visit:
990 990 break
991 991 c = visit.pop(max(visit))
992 992 yield c
993 993
994 994 class filectx(basefilectx):
995 995 """A filecontext object makes access to data related to a particular
996 996 filerevision convenient."""
997 997 def __init__(self, repo, path, changeid=None, fileid=None,
998 998 filelog=None, changectx=None):
999 999 """changeid can be a changeset revision, node, or tag.
1000 1000 fileid can be a file revision or node."""
1001 1001 self._repo = repo
1002 1002 self._path = path
1003 1003
1004 1004 assert (changeid is not None
1005 1005 or fileid is not None
1006 1006 or changectx is not None), \
1007 1007 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1008 1008 % (changeid, fileid, changectx))
1009 1009
1010 1010 if filelog is not None:
1011 1011 self._filelog = filelog
1012 1012
1013 1013 if changeid is not None:
1014 1014 self._changeid = changeid
1015 1015 if changectx is not None:
1016 1016 self._changectx = changectx
1017 1017 if fileid is not None:
1018 1018 self._fileid = fileid
1019 1019
1020 1020 @propertycache
1021 1021 def _changectx(self):
1022 1022 try:
1023 1023 return changectx(self._repo, self._changeid)
1024 1024 except error.FilteredRepoLookupError:
1025 1025 # Linkrev may point to any revision in the repository. When the
1026 1026 # repository is filtered this may lead to `filectx` trying to build
1027 1027 # `changectx` for filtered revision. In such case we fallback to
1028 1028 # creating `changectx` on the unfiltered version of the reposition.
1029 1029 # This fallback should not be an issue because `changectx` from
1030 1030 # `filectx` are not used in complex operations that care about
1031 1031 # filtering.
1032 1032 #
1033 1033 # This fallback is a cheap and dirty fix that prevent several
1034 1034 # crashes. It does not ensure the behavior is correct. However the
1035 1035 # behavior was not correct before filtering either and "incorrect
1036 1036 # behavior" is seen as better as "crash"
1037 1037 #
1038 1038 # Linkrevs have several serious troubles with filtering that are
1039 1039 # complicated to solve. Proper handling of the issue here should be
1040 1040 # considered when solving linkrev issue are on the table.
1041 1041 return changectx(self._repo.unfiltered(), self._changeid)
1042 1042
1043 1043 def filectx(self, fileid, changeid=None):
1044 1044 '''opens an arbitrary revision of the file without
1045 1045 opening a new filelog'''
1046 1046 return filectx(self._repo, self._path, fileid=fileid,
1047 1047 filelog=self._filelog, changeid=changeid)
1048 1048
1049 1049 def data(self):
1050 1050 try:
1051 1051 return self._filelog.read(self._filenode)
1052 1052 except error.CensoredNodeError:
1053 1053 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1054 1054 return ""
1055 1055 raise util.Abort(_("censored node: %s") % short(self._filenode),
1056 1056 hint=_("set censor.policy to ignore errors"))
1057 1057
1058 1058 def size(self):
1059 1059 return self._filelog.size(self._filerev)
1060 1060
1061 1061 def renamed(self):
1062 1062 """check if file was actually renamed in this changeset revision
1063 1063
1064 1064 If rename logged in file revision, we report copy for changeset only
1065 1065 if file revisions linkrev points back to the changeset in question
1066 1066 or both changeset parents contain different file revisions.
1067 1067 """
1068 1068
1069 1069 renamed = self._filelog.renamed(self._filenode)
1070 1070 if not renamed:
1071 1071 return renamed
1072 1072
1073 1073 if self.rev() == self.linkrev():
1074 1074 return renamed
1075 1075
1076 1076 name = self.path()
1077 1077 fnode = self._filenode
1078 1078 for p in self._changectx.parents():
1079 1079 try:
1080 1080 if fnode == p.filenode(name):
1081 1081 return None
1082 1082 except error.LookupError:
1083 1083 pass
1084 1084 return renamed
1085 1085
1086 1086 def children(self):
1087 1087 # hard for renames
1088 1088 c = self._filelog.children(self._filenode)
1089 1089 return [filectx(self._repo, self._path, fileid=x,
1090 1090 filelog=self._filelog) for x in c]
1091 1091
1092 1092 class committablectx(basectx):
1093 1093 """A committablectx object provides common functionality for a context that
1094 1094 wants the ability to commit, e.g. workingctx or memctx."""
1095 1095 def __init__(self, repo, text="", user=None, date=None, extra=None,
1096 1096 changes=None):
1097 1097 self._repo = repo
1098 1098 self._rev = None
1099 1099 self._node = None
1100 1100 self._text = text
1101 1101 if date:
1102 1102 self._date = util.parsedate(date)
1103 1103 if user:
1104 1104 self._user = user
1105 1105 if changes:
1106 1106 self._status = changes
1107 1107
1108 1108 self._extra = {}
1109 1109 if extra:
1110 1110 self._extra = extra.copy()
1111 1111 if 'branch' not in self._extra:
1112 1112 try:
1113 1113 branch = encoding.fromlocal(self._repo.dirstate.branch())
1114 1114 except UnicodeDecodeError:
1115 1115 raise util.Abort(_('branch name not in UTF-8!'))
1116 1116 self._extra['branch'] = branch
1117 1117 if self._extra['branch'] == '':
1118 1118 self._extra['branch'] = 'default'
1119 1119
1120 1120 def __str__(self):
1121 1121 return str(self._parents[0]) + "+"
1122 1122
1123 1123 def __nonzero__(self):
1124 1124 return True
1125 1125
1126 1126 def _buildflagfunc(self):
1127 1127 # Create a fallback function for getting file flags when the
1128 1128 # filesystem doesn't support them
1129 1129
1130 1130 copiesget = self._repo.dirstate.copies().get
1131 1131
1132 1132 if len(self._parents) < 2:
1133 1133 # when we have one parent, it's easy: copy from parent
1134 1134 man = self._parents[0].manifest()
1135 1135 def func(f):
1136 1136 f = copiesget(f, f)
1137 1137 return man.flags(f)
1138 1138 else:
1139 1139 # merges are tricky: we try to reconstruct the unstored
1140 1140 # result from the merge (issue1802)
1141 1141 p1, p2 = self._parents
1142 1142 pa = p1.ancestor(p2)
1143 1143 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1144 1144
1145 1145 def func(f):
1146 1146 f = copiesget(f, f) # may be wrong for merges with copies
1147 1147 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1148 1148 if fl1 == fl2:
1149 1149 return fl1
1150 1150 if fl1 == fla:
1151 1151 return fl2
1152 1152 if fl2 == fla:
1153 1153 return fl1
1154 1154 return '' # punt for conflicts
1155 1155
1156 1156 return func
1157 1157
1158 1158 @propertycache
1159 1159 def _flagfunc(self):
1160 1160 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1161 1161
1162 1162 @propertycache
1163 1163 def _manifest(self):
1164 1164 """generate a manifest corresponding to the values in self._status
1165 1165
1166 1166 This reuse the file nodeid from parent, but we append an extra letter
1167 1167 when modified. Modified files get an extra 'm' while added files get
1168 1168 an extra 'a'. This is used by manifests merge to see that files
1169 1169 are different and by update logic to avoid deleting newly added files.
1170 1170 """
1171 1171
1172 1172 man1 = self._parents[0].manifest()
1173 1173 man = man1.copy()
1174 1174 if len(self._parents) > 1:
1175 1175 man2 = self.p2().manifest()
1176 1176 def getman(f):
1177 1177 if f in man1:
1178 1178 return man1
1179 1179 return man2
1180 1180 else:
1181 1181 getman = lambda f: man1
1182 1182
1183 1183 copied = self._repo.dirstate.copies()
1184 1184 ff = self._flagfunc
1185 1185 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1186 1186 for f in l:
1187 1187 orig = copied.get(f, f)
1188 1188 man[f] = getman(orig).get(orig, nullid) + i
1189 1189 try:
1190 1190 man.setflag(f, ff(f))
1191 1191 except OSError:
1192 1192 pass
1193 1193
1194 1194 for f in self._status.deleted + self._status.removed:
1195 1195 if f in man:
1196 1196 del man[f]
1197 1197
1198 1198 return man
1199 1199
1200 1200 @propertycache
1201 1201 def _status(self):
1202 1202 return self._repo.status()
1203 1203
1204 1204 @propertycache
1205 1205 def _user(self):
1206 1206 return self._repo.ui.username()
1207 1207
1208 1208 @propertycache
1209 1209 def _date(self):
1210 1210 return util.makedate()
1211 1211
1212 1212 def subrev(self, subpath):
1213 1213 return None
1214 1214
1215 1215 def manifestnode(self):
1216 1216 return None
1217 1217 def user(self):
1218 1218 return self._user or self._repo.ui.username()
1219 1219 def date(self):
1220 1220 return self._date
1221 1221 def description(self):
1222 1222 return self._text
1223 1223 def files(self):
1224 1224 return sorted(self._status.modified + self._status.added +
1225 1225 self._status.removed)
1226 1226
1227 1227 def modified(self):
1228 1228 return self._status.modified
1229 1229 def added(self):
1230 1230 return self._status.added
1231 1231 def removed(self):
1232 1232 return self._status.removed
1233 1233 def deleted(self):
1234 1234 return self._status.deleted
1235 1235 def branch(self):
1236 1236 return encoding.tolocal(self._extra['branch'])
1237 1237 def closesbranch(self):
1238 1238 return 'close' in self._extra
1239 1239 def extra(self):
1240 1240 return self._extra
1241 1241
1242 1242 def tags(self):
1243 1243 t = []
1244 1244 for p in self.parents():
1245 1245 t.extend(p.tags())
1246 1246 return t
1247 1247
1248 1248 def bookmarks(self):
1249 1249 b = []
1250 1250 for p in self.parents():
1251 1251 b.extend(p.bookmarks())
1252 1252 return b
1253 1253
1254 1254 def phase(self):
1255 1255 phase = phases.draft # default phase to draft
1256 1256 for p in self.parents():
1257 1257 phase = max(phase, p.phase())
1258 1258 return phase
1259 1259
1260 1260 def hidden(self):
1261 1261 return False
1262 1262
1263 1263 def children(self):
1264 1264 return []
1265 1265
1266 1266 def flags(self, path):
1267 1267 if '_manifest' in self.__dict__:
1268 1268 try:
1269 1269 return self._manifest.flags(path)
1270 1270 except KeyError:
1271 1271 return ''
1272 1272
1273 1273 try:
1274 1274 return self._flagfunc(path)
1275 1275 except OSError:
1276 1276 return ''
1277 1277
1278 1278 def ancestor(self, c2):
1279 1279 """return the "best" ancestor context of self and c2"""
1280 1280 return self._parents[0].ancestor(c2) # punt on two parents for now
1281 1281
1282 1282 def walk(self, match):
1283 1283 '''Generates matching file names.'''
1284 1284 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1285 1285 True, False))
1286 1286
1287 1287 def matches(self, match):
1288 1288 return sorted(self._repo.dirstate.matches(match))
1289 1289
1290 1290 def ancestors(self):
1291 1291 for p in self._parents:
1292 1292 yield p
1293 1293 for a in self._repo.changelog.ancestors(
1294 1294 [p.rev() for p in self._parents]):
1295 1295 yield changectx(self._repo, a)
1296 1296
1297 1297 def markcommitted(self, node):
1298 1298 """Perform post-commit cleanup necessary after committing this ctx
1299 1299
1300 1300 Specifically, this updates backing stores this working context
1301 1301 wraps to reflect the fact that the changes reflected by this
1302 1302 workingctx have been committed. For example, it marks
1303 1303 modified and added files as normal in the dirstate.
1304 1304
1305 1305 """
1306 1306
1307 1307 self._repo.dirstate.beginparentchange()
1308 1308 for f in self.modified() + self.added():
1309 1309 self._repo.dirstate.normal(f)
1310 1310 for f in self.removed():
1311 1311 self._repo.dirstate.drop(f)
1312 1312 self._repo.dirstate.setparents(node)
1313 1313 self._repo.dirstate.endparentchange()
1314 1314
1315 1315 class workingctx(committablectx):
1316 1316 """A workingctx object makes access to data related to
1317 1317 the current working directory convenient.
1318 1318 date - any valid date string or (unixtime, offset), or None.
1319 1319 user - username string, or None.
1320 1320 extra - a dictionary of extra values, or None.
1321 1321 changes - a list of file lists as returned by localrepo.status()
1322 1322 or None to use the repository status.
1323 1323 """
1324 1324 def __init__(self, repo, text="", user=None, date=None, extra=None,
1325 1325 changes=None):
1326 1326 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1327 1327
1328 1328 def __iter__(self):
1329 1329 d = self._repo.dirstate
1330 1330 for f in d:
1331 1331 if d[f] != 'r':
1332 1332 yield f
1333 1333
1334 1334 def __contains__(self, key):
1335 1335 return self._repo.dirstate[key] not in "?r"
1336 1336
1337 def hex(self):
1338 return "ff" * 20
1339
1337 1340 @propertycache
1338 1341 def _parents(self):
1339 1342 p = self._repo.dirstate.parents()
1340 1343 if p[1] == nullid:
1341 1344 p = p[:-1]
1342 1345 return [changectx(self._repo, x) for x in p]
1343 1346
1344 1347 def filectx(self, path, filelog=None):
1345 1348 """get a file context from the working directory"""
1346 1349 return workingfilectx(self._repo, path, workingctx=self,
1347 1350 filelog=filelog)
1348 1351
1349 1352 def dirty(self, missing=False, merge=True, branch=True):
1350 1353 "check whether a working directory is modified"
1351 1354 # check subrepos first
1352 1355 for s in sorted(self.substate):
1353 1356 if self.sub(s).dirty():
1354 1357 return True
1355 1358 # check current working dir
1356 1359 return ((merge and self.p2()) or
1357 1360 (branch and self.branch() != self.p1().branch()) or
1358 1361 self.modified() or self.added() or self.removed() or
1359 1362 (missing and self.deleted()))
1360 1363
1361 1364 def add(self, list, prefix=""):
1362 1365 join = lambda f: os.path.join(prefix, f)
1363 1366 wlock = self._repo.wlock()
1364 1367 ui, ds = self._repo.ui, self._repo.dirstate
1365 1368 try:
1366 1369 rejected = []
1367 1370 lstat = self._repo.wvfs.lstat
1368 1371 for f in list:
1369 1372 scmutil.checkportable(ui, join(f))
1370 1373 try:
1371 1374 st = lstat(f)
1372 1375 except OSError:
1373 1376 ui.warn(_("%s does not exist!\n") % join(f))
1374 1377 rejected.append(f)
1375 1378 continue
1376 1379 if st.st_size > 10000000:
1377 1380 ui.warn(_("%s: up to %d MB of RAM may be required "
1378 1381 "to manage this file\n"
1379 1382 "(use 'hg revert %s' to cancel the "
1380 1383 "pending addition)\n")
1381 1384 % (f, 3 * st.st_size // 1000000, join(f)))
1382 1385 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1383 1386 ui.warn(_("%s not added: only files and symlinks "
1384 1387 "supported currently\n") % join(f))
1385 1388 rejected.append(f)
1386 1389 elif ds[f] in 'amn':
1387 1390 ui.warn(_("%s already tracked!\n") % join(f))
1388 1391 elif ds[f] == 'r':
1389 1392 ds.normallookup(f)
1390 1393 else:
1391 1394 ds.add(f)
1392 1395 return rejected
1393 1396 finally:
1394 1397 wlock.release()
1395 1398
1396 1399 def forget(self, files, prefix=""):
1397 1400 join = lambda f: os.path.join(prefix, f)
1398 1401 wlock = self._repo.wlock()
1399 1402 try:
1400 1403 rejected = []
1401 1404 for f in files:
1402 1405 if f not in self._repo.dirstate:
1403 1406 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1404 1407 rejected.append(f)
1405 1408 elif self._repo.dirstate[f] != 'a':
1406 1409 self._repo.dirstate.remove(f)
1407 1410 else:
1408 1411 self._repo.dirstate.drop(f)
1409 1412 return rejected
1410 1413 finally:
1411 1414 wlock.release()
1412 1415
1413 1416 def undelete(self, list):
1414 1417 pctxs = self.parents()
1415 1418 wlock = self._repo.wlock()
1416 1419 try:
1417 1420 for f in list:
1418 1421 if self._repo.dirstate[f] != 'r':
1419 1422 self._repo.ui.warn(_("%s not removed!\n") % f)
1420 1423 else:
1421 1424 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1422 1425 t = fctx.data()
1423 1426 self._repo.wwrite(f, t, fctx.flags())
1424 1427 self._repo.dirstate.normal(f)
1425 1428 finally:
1426 1429 wlock.release()
1427 1430
1428 1431 def copy(self, source, dest):
1429 1432 try:
1430 1433 st = self._repo.wvfs.lstat(dest)
1431 1434 except OSError, err:
1432 1435 if err.errno != errno.ENOENT:
1433 1436 raise
1434 1437 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1435 1438 return
1436 1439 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1437 1440 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1438 1441 "symbolic link\n") % dest)
1439 1442 else:
1440 1443 wlock = self._repo.wlock()
1441 1444 try:
1442 1445 if self._repo.dirstate[dest] in '?':
1443 1446 self._repo.dirstate.add(dest)
1444 1447 elif self._repo.dirstate[dest] in 'r':
1445 1448 self._repo.dirstate.normallookup(dest)
1446 1449 self._repo.dirstate.copy(source, dest)
1447 1450 finally:
1448 1451 wlock.release()
1449 1452
1450 1453 def match(self, pats=[], include=None, exclude=None, default='glob',
1451 1454 listsubrepos=False, badfn=None):
1452 1455 r = self._repo
1453 1456
1454 1457 # Only a case insensitive filesystem needs magic to translate user input
1455 1458 # to actual case in the filesystem.
1456 1459 if not util.checkcase(r.root):
1457 1460 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1458 1461 exclude, default, r.auditor, self,
1459 1462 listsubrepos=listsubrepos,
1460 1463 badfn=badfn)
1461 1464 return matchmod.match(r.root, r.getcwd(), pats,
1462 1465 include, exclude, default,
1463 1466 auditor=r.auditor, ctx=self,
1464 1467 listsubrepos=listsubrepos, badfn=badfn)
1465 1468
1466 1469 def _filtersuspectsymlink(self, files):
1467 1470 if not files or self._repo.dirstate._checklink:
1468 1471 return files
1469 1472
1470 1473 # Symlink placeholders may get non-symlink-like contents
1471 1474 # via user error or dereferencing by NFS or Samba servers,
1472 1475 # so we filter out any placeholders that don't look like a
1473 1476 # symlink
1474 1477 sane = []
1475 1478 for f in files:
1476 1479 if self.flags(f) == 'l':
1477 1480 d = self[f].data()
1478 1481 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1479 1482 self._repo.ui.debug('ignoring suspect symlink placeholder'
1480 1483 ' "%s"\n' % f)
1481 1484 continue
1482 1485 sane.append(f)
1483 1486 return sane
1484 1487
1485 1488 def _checklookup(self, files):
1486 1489 # check for any possibly clean files
1487 1490 if not files:
1488 1491 return [], []
1489 1492
1490 1493 modified = []
1491 1494 fixup = []
1492 1495 pctx = self._parents[0]
1493 1496 # do a full compare of any files that might have changed
1494 1497 for f in sorted(files):
1495 1498 if (f not in pctx or self.flags(f) != pctx.flags(f)
1496 1499 or pctx[f].cmp(self[f])):
1497 1500 modified.append(f)
1498 1501 else:
1499 1502 fixup.append(f)
1500 1503
1501 1504 # update dirstate for files that are actually clean
1502 1505 if fixup:
1503 1506 try:
1504 1507 # updating the dirstate is optional
1505 1508 # so we don't wait on the lock
1506 1509 # wlock can invalidate the dirstate, so cache normal _after_
1507 1510 # taking the lock
1508 1511 wlock = self._repo.wlock(False)
1509 1512 normal = self._repo.dirstate.normal
1510 1513 try:
1511 1514 for f in fixup:
1512 1515 normal(f)
1513 1516 finally:
1514 1517 wlock.release()
1515 1518 except error.LockError:
1516 1519 pass
1517 1520 return modified, fixup
1518 1521
1519 1522 def _manifestmatches(self, match, s):
1520 1523 """Slow path for workingctx
1521 1524
1522 1525 The fast path is when we compare the working directory to its parent
1523 1526 which means this function is comparing with a non-parent; therefore we
1524 1527 need to build a manifest and return what matches.
1525 1528 """
1526 1529 mf = self._repo['.']._manifestmatches(match, s)
1527 1530 for f in s.modified + s.added:
1528 1531 mf[f] = _newnode
1529 1532 mf.setflag(f, self.flags(f))
1530 1533 for f in s.removed:
1531 1534 if f in mf:
1532 1535 del mf[f]
1533 1536 return mf
1534 1537
1535 1538 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1536 1539 unknown=False):
1537 1540 '''Gets the status from the dirstate -- internal use only.'''
1538 1541 listignored, listclean, listunknown = ignored, clean, unknown
1539 1542 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1540 1543 subrepos = []
1541 1544 if '.hgsub' in self:
1542 1545 subrepos = sorted(self.substate)
1543 1546 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1544 1547 listclean, listunknown)
1545 1548
1546 1549 # check for any possibly clean files
1547 1550 if cmp:
1548 1551 modified2, fixup = self._checklookup(cmp)
1549 1552 s.modified.extend(modified2)
1550 1553
1551 1554 # update dirstate for files that are actually clean
1552 1555 if fixup and listclean:
1553 1556 s.clean.extend(fixup)
1554 1557
1555 1558 if match.always():
1556 1559 # cache for performance
1557 1560 if s.unknown or s.ignored or s.clean:
1558 1561 # "_status" is cached with list*=False in the normal route
1559 1562 self._status = scmutil.status(s.modified, s.added, s.removed,
1560 1563 s.deleted, [], [], [])
1561 1564 else:
1562 1565 self._status = s
1563 1566
1564 1567 return s
1565 1568
1566 1569 def _buildstatus(self, other, s, match, listignored, listclean,
1567 1570 listunknown):
1568 1571 """build a status with respect to another context
1569 1572
1570 1573 This includes logic for maintaining the fast path of status when
1571 1574 comparing the working directory against its parent, which is to skip
1572 1575 building a new manifest if self (working directory) is not comparing
1573 1576 against its parent (repo['.']).
1574 1577 """
1575 1578 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1576 1579 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1577 1580 # might have accidentally ended up with the entire contents of the file
1578 1581 # they are supposed to be linking to.
1579 1582 s.modified[:] = self._filtersuspectsymlink(s.modified)
1580 1583 if other != self._repo['.']:
1581 1584 s = super(workingctx, self)._buildstatus(other, s, match,
1582 1585 listignored, listclean,
1583 1586 listunknown)
1584 1587 return s
1585 1588
1586 1589 def _matchstatus(self, other, match):
1587 1590 """override the match method with a filter for directory patterns
1588 1591
1589 1592 We use inheritance to customize the match.bad method only in cases of
1590 1593 workingctx since it belongs only to the working directory when
1591 1594 comparing against the parent changeset.
1592 1595
1593 1596 If we aren't comparing against the working directory's parent, then we
1594 1597 just use the default match object sent to us.
1595 1598 """
1596 1599 superself = super(workingctx, self)
1597 1600 match = superself._matchstatus(other, match)
1598 1601 if other != self._repo['.']:
1599 1602 def bad(f, msg):
1600 1603 # 'f' may be a directory pattern from 'match.files()',
1601 1604 # so 'f not in ctx1' is not enough
1602 1605 if f not in other and not other.hasdir(f):
1603 1606 self._repo.ui.warn('%s: %s\n' %
1604 1607 (self._repo.dirstate.pathto(f), msg))
1605 1608 match.bad = bad
1606 1609 return match
1607 1610
1608 1611 class committablefilectx(basefilectx):
1609 1612 """A committablefilectx provides common functionality for a file context
1610 1613 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1611 1614 def __init__(self, repo, path, filelog=None, ctx=None):
1612 1615 self._repo = repo
1613 1616 self._path = path
1614 1617 self._changeid = None
1615 1618 self._filerev = self._filenode = None
1616 1619
1617 1620 if filelog is not None:
1618 1621 self._filelog = filelog
1619 1622 if ctx:
1620 1623 self._changectx = ctx
1621 1624
1622 1625 def __nonzero__(self):
1623 1626 return True
1624 1627
1625 1628 def linkrev(self):
1626 1629 # linked to self._changectx no matter if file is modified or not
1627 1630 return self.rev()
1628 1631
1629 1632 def parents(self):
1630 1633 '''return parent filectxs, following copies if necessary'''
1631 1634 def filenode(ctx, path):
1632 1635 return ctx._manifest.get(path, nullid)
1633 1636
1634 1637 path = self._path
1635 1638 fl = self._filelog
1636 1639 pcl = self._changectx._parents
1637 1640 renamed = self.renamed()
1638 1641
1639 1642 if renamed:
1640 1643 pl = [renamed + (None,)]
1641 1644 else:
1642 1645 pl = [(path, filenode(pcl[0], path), fl)]
1643 1646
1644 1647 for pc in pcl[1:]:
1645 1648 pl.append((path, filenode(pc, path), fl))
1646 1649
1647 1650 return [self._parentfilectx(p, fileid=n, filelog=l)
1648 1651 for p, n, l in pl if n != nullid]
1649 1652
1650 1653 def children(self):
1651 1654 return []
1652 1655
1653 1656 class workingfilectx(committablefilectx):
1654 1657 """A workingfilectx object makes access to data related to a particular
1655 1658 file in the working directory convenient."""
1656 1659 def __init__(self, repo, path, filelog=None, workingctx=None):
1657 1660 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1658 1661
1659 1662 @propertycache
1660 1663 def _changectx(self):
1661 1664 return workingctx(self._repo)
1662 1665
1663 1666 def data(self):
1664 1667 return self._repo.wread(self._path)
1665 1668 def renamed(self):
1666 1669 rp = self._repo.dirstate.copied(self._path)
1667 1670 if not rp:
1668 1671 return None
1669 1672 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1670 1673
1671 1674 def size(self):
1672 1675 return self._repo.wvfs.lstat(self._path).st_size
1673 1676 def date(self):
1674 1677 t, tz = self._changectx.date()
1675 1678 try:
1676 1679 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1677 1680 except OSError, err:
1678 1681 if err.errno != errno.ENOENT:
1679 1682 raise
1680 1683 return (t, tz)
1681 1684
1682 1685 def cmp(self, fctx):
1683 1686 """compare with other file context
1684 1687
1685 1688 returns True if different than fctx.
1686 1689 """
1687 1690 # fctx should be a filectx (not a workingfilectx)
1688 1691 # invert comparison to reuse the same code path
1689 1692 return fctx.cmp(self)
1690 1693
1691 1694 def remove(self, ignoremissing=False):
1692 1695 """wraps unlink for a repo's working directory"""
1693 1696 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1694 1697
1695 1698 def write(self, data, flags):
1696 1699 """wraps repo.wwrite"""
1697 1700 self._repo.wwrite(self._path, data, flags)
1698 1701
1699 1702 class workingcommitctx(workingctx):
1700 1703 """A workingcommitctx object makes access to data related to
1701 1704 the revision being committed convenient.
1702 1705
1703 1706 This hides changes in the working directory, if they aren't
1704 1707 committed in this context.
1705 1708 """
1706 1709 def __init__(self, repo, changes,
1707 1710 text="", user=None, date=None, extra=None):
1708 1711 super(workingctx, self).__init__(repo, text, user, date, extra,
1709 1712 changes)
1710 1713
1711 1714 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1712 1715 unknown=False):
1713 1716 """Return matched files only in ``self._status``
1714 1717
1715 1718 Uncommitted files appear "clean" via this context, even if
1716 1719 they aren't actually so in the working directory.
1717 1720 """
1718 1721 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1719 1722 if clean:
1720 1723 clean = [f for f in self._manifest if f not in self._changedset]
1721 1724 else:
1722 1725 clean = []
1723 1726 return scmutil.status([f for f in self._status.modified if match(f)],
1724 1727 [f for f in self._status.added if match(f)],
1725 1728 [f for f in self._status.removed if match(f)],
1726 1729 [], [], [], clean)
1727 1730
1728 1731 @propertycache
1729 1732 def _changedset(self):
1730 1733 """Return the set of files changed in this context
1731 1734 """
1732 1735 changed = set(self._status.modified)
1733 1736 changed.update(self._status.added)
1734 1737 changed.update(self._status.removed)
1735 1738 return changed
1736 1739
1737 1740 class memctx(committablectx):
1738 1741 """Use memctx to perform in-memory commits via localrepo.commitctx().
1739 1742
1740 1743 Revision information is supplied at initialization time while
1741 1744 related files data and is made available through a callback
1742 1745 mechanism. 'repo' is the current localrepo, 'parents' is a
1743 1746 sequence of two parent revisions identifiers (pass None for every
1744 1747 missing parent), 'text' is the commit message and 'files' lists
1745 1748 names of files touched by the revision (normalized and relative to
1746 1749 repository root).
1747 1750
1748 1751 filectxfn(repo, memctx, path) is a callable receiving the
1749 1752 repository, the current memctx object and the normalized path of
1750 1753 requested file, relative to repository root. It is fired by the
1751 1754 commit function for every file in 'files', but calls order is
1752 1755 undefined. If the file is available in the revision being
1753 1756 committed (updated or added), filectxfn returns a memfilectx
1754 1757 object. If the file was removed, filectxfn raises an
1755 1758 IOError. Moved files are represented by marking the source file
1756 1759 removed and the new file added with copy information (see
1757 1760 memfilectx).
1758 1761
1759 1762 user receives the committer name and defaults to current
1760 1763 repository username, date is the commit date in any format
1761 1764 supported by util.parsedate() and defaults to current date, extra
1762 1765 is a dictionary of metadata or is left empty.
1763 1766 """
1764 1767
1765 1768 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1766 1769 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1767 1770 # this field to determine what to do in filectxfn.
1768 1771 _returnnoneformissingfiles = True
1769 1772
1770 1773 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1771 1774 date=None, extra=None, editor=False):
1772 1775 super(memctx, self).__init__(repo, text, user, date, extra)
1773 1776 self._rev = None
1774 1777 self._node = None
1775 1778 parents = [(p or nullid) for p in parents]
1776 1779 p1, p2 = parents
1777 1780 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1778 1781 files = sorted(set(files))
1779 1782 self._files = files
1780 1783 self.substate = {}
1781 1784
1782 1785 # if store is not callable, wrap it in a function
1783 1786 if not callable(filectxfn):
1784 1787 def getfilectx(repo, memctx, path):
1785 1788 fctx = filectxfn[path]
1786 1789 # this is weird but apparently we only keep track of one parent
1787 1790 # (why not only store that instead of a tuple?)
1788 1791 copied = fctx.renamed()
1789 1792 if copied:
1790 1793 copied = copied[0]
1791 1794 return memfilectx(repo, path, fctx.data(),
1792 1795 islink=fctx.islink(), isexec=fctx.isexec(),
1793 1796 copied=copied, memctx=memctx)
1794 1797 self._filectxfn = getfilectx
1795 1798 else:
1796 1799 # "util.cachefunc" reduces invocation of possibly expensive
1797 1800 # "filectxfn" for performance (e.g. converting from another VCS)
1798 1801 self._filectxfn = util.cachefunc(filectxfn)
1799 1802
1800 1803 if extra:
1801 1804 self._extra = extra.copy()
1802 1805 else:
1803 1806 self._extra = {}
1804 1807
1805 1808 if self._extra.get('branch', '') == '':
1806 1809 self._extra['branch'] = 'default'
1807 1810
1808 1811 if editor:
1809 1812 self._text = editor(self._repo, self, [])
1810 1813 self._repo.savecommitmessage(self._text)
1811 1814
1812 1815 def filectx(self, path, filelog=None):
1813 1816 """get a file context from the working directory
1814 1817
1815 1818 Returns None if file doesn't exist and should be removed."""
1816 1819 return self._filectxfn(self._repo, self, path)
1817 1820
1818 1821 def commit(self):
1819 1822 """commit context to the repo"""
1820 1823 return self._repo.commitctx(self)
1821 1824
1822 1825 @propertycache
1823 1826 def _manifest(self):
1824 1827 """generate a manifest based on the return values of filectxfn"""
1825 1828
1826 1829 # keep this simple for now; just worry about p1
1827 1830 pctx = self._parents[0]
1828 1831 man = pctx.manifest().copy()
1829 1832
1830 1833 for f in self._status.modified:
1831 1834 p1node = nullid
1832 1835 p2node = nullid
1833 1836 p = pctx[f].parents() # if file isn't in pctx, check p2?
1834 1837 if len(p) > 0:
1835 1838 p1node = p[0].node()
1836 1839 if len(p) > 1:
1837 1840 p2node = p[1].node()
1838 1841 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1839 1842
1840 1843 for f in self._status.added:
1841 1844 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1842 1845
1843 1846 for f in self._status.removed:
1844 1847 if f in man:
1845 1848 del man[f]
1846 1849
1847 1850 return man
1848 1851
1849 1852 @propertycache
1850 1853 def _status(self):
1851 1854 """Calculate exact status from ``files`` specified at construction
1852 1855 """
1853 1856 man1 = self.p1().manifest()
1854 1857 p2 = self._parents[1]
1855 1858 # "1 < len(self._parents)" can't be used for checking
1856 1859 # existence of the 2nd parent, because "memctx._parents" is
1857 1860 # explicitly initialized by the list, of which length is 2.
1858 1861 if p2.node() != nullid:
1859 1862 man2 = p2.manifest()
1860 1863 managing = lambda f: f in man1 or f in man2
1861 1864 else:
1862 1865 managing = lambda f: f in man1
1863 1866
1864 1867 modified, added, removed = [], [], []
1865 1868 for f in self._files:
1866 1869 if not managing(f):
1867 1870 added.append(f)
1868 1871 elif self[f]:
1869 1872 modified.append(f)
1870 1873 else:
1871 1874 removed.append(f)
1872 1875
1873 1876 return scmutil.status(modified, added, removed, [], [], [], [])
1874 1877
1875 1878 class memfilectx(committablefilectx):
1876 1879 """memfilectx represents an in-memory file to commit.
1877 1880
1878 1881 See memctx and committablefilectx for more details.
1879 1882 """
1880 1883 def __init__(self, repo, path, data, islink=False,
1881 1884 isexec=False, copied=None, memctx=None):
1882 1885 """
1883 1886 path is the normalized file path relative to repository root.
1884 1887 data is the file content as a string.
1885 1888 islink is True if the file is a symbolic link.
1886 1889 isexec is True if the file is executable.
1887 1890 copied is the source file path if current file was copied in the
1888 1891 revision being committed, or None."""
1889 1892 super(memfilectx, self).__init__(repo, path, None, memctx)
1890 1893 self._data = data
1891 1894 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1892 1895 self._copied = None
1893 1896 if copied:
1894 1897 self._copied = (copied, nullid)
1895 1898
1896 1899 def data(self):
1897 1900 return self._data
1898 1901 def size(self):
1899 1902 return len(self.data())
1900 1903 def flags(self):
1901 1904 return self._flags
1902 1905 def renamed(self):
1903 1906 return self._copied
1904 1907
1905 1908 def remove(self, ignoremissing=False):
1906 1909 """wraps unlink for a repo's working directory"""
1907 1910 # need to figure out what to do here
1908 1911 del self._changectx[self._path]
1909 1912
1910 1913 def write(self, data, flags):
1911 1914 """wraps repo.wwrite"""
1912 1915 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now