##// END OF EJS Templates
context: add an optional constructor parameter for a match.bad() override...
Matt Harbison -
r25465:f472228a default
parent child Browse files
Show More
@@ -1,1911 +1,1912 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 return iter(self._manifest)
70 70
71 71 def _manifestmatches(self, match, s):
72 72 """generate a new manifest filtered by the match argument
73 73
74 74 This method is for internal use only and mainly exists to provide an
75 75 object oriented way for other contexts to customize the manifest
76 76 generation.
77 77 """
78 78 return self.manifest().matches(match)
79 79
80 80 def _matchstatus(self, other, match):
81 81 """return match.always if match is none
82 82
83 83 This internal method provides a way for child objects to override the
84 84 match operator.
85 85 """
86 86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 87
88 88 def _buildstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """build a status with respect to another context"""
91 91 # Load earliest manifest first for caching reasons. More specifically,
92 92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 95 # delta to what's in the cache. So that's one full reconstruction + one
96 96 # delta application.
97 97 if self.rev() is not None and self.rev() < other.rev():
98 98 self.manifest()
99 99 mf1 = other._manifestmatches(match, s)
100 100 mf2 = self._manifestmatches(match, s)
101 101
102 102 modified, added = [], []
103 103 removed = []
104 104 clean = []
105 105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 106 deletedset = set(deleted)
107 107 d = mf1.diff(mf2, clean=listclean)
108 108 for fn, value in d.iteritems():
109 109 if fn in deletedset:
110 110 continue
111 111 if value is None:
112 112 clean.append(fn)
113 113 continue
114 114 (node1, flag1), (node2, flag2) = value
115 115 if node1 is None:
116 116 added.append(fn)
117 117 elif node2 is None:
118 118 removed.append(fn)
119 119 elif node2 != _newnode:
120 120 # The file was not a new file in mf2, so an entry
121 121 # from diff is really a difference.
122 122 modified.append(fn)
123 123 elif self[fn].cmp(other[fn]):
124 124 # node2 was newnode, but the working file doesn't
125 125 # match the one in mf1.
126 126 modified.append(fn)
127 127 else:
128 128 clean.append(fn)
129 129
130 130 if removed:
131 131 # need to filter files if they are already reported as removed
132 132 unknown = [fn for fn in unknown if fn not in mf1]
133 133 ignored = [fn for fn in ignored if fn not in mf1]
134 134 # if they're deleted, don't report them as removed
135 135 removed = [fn for fn in removed if fn not in deletedset]
136 136
137 137 return scmutil.status(modified, added, removed, deleted, unknown,
138 138 ignored, clean)
139 139
140 140 @propertycache
141 141 def substate(self):
142 142 return subrepo.state(self, self._repo.ui)
143 143
144 144 def subrev(self, subpath):
145 145 return self.substate[subpath][1]
146 146
147 147 def rev(self):
148 148 return self._rev
149 149 def node(self):
150 150 return self._node
151 151 def hex(self):
152 152 return hex(self.node())
153 153 def manifest(self):
154 154 return self._manifest
155 155 def repo(self):
156 156 return self._repo
157 157 def phasestr(self):
158 158 return phases.phasenames[self.phase()]
159 159 def mutable(self):
160 160 return self.phase() > phases.public
161 161
162 162 def getfileset(self, expr):
163 163 return fileset.getfileset(self, expr)
164 164
165 165 def obsolete(self):
166 166 """True if the changeset is obsolete"""
167 167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168 168
169 169 def extinct(self):
170 170 """True if the changeset is extinct"""
171 171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172 172
173 173 def unstable(self):
174 174 """True if the changeset is not obsolete but it's ancestor are"""
175 175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176 176
177 177 def bumped(self):
178 178 """True if the changeset try to be a successor of a public changeset
179 179
180 180 Only non-public and non-obsolete changesets may be bumped.
181 181 """
182 182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183 183
184 184 def divergent(self):
185 185 """Is a successors of a changeset with multiple possible successors set
186 186
187 187 Only non-public and non-obsolete changesets may be divergent.
188 188 """
189 189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190 190
191 191 def troubled(self):
192 192 """True if the changeset is either unstable, bumped or divergent"""
193 193 return self.unstable() or self.bumped() or self.divergent()
194 194
195 195 def troubles(self):
196 196 """return the list of troubles affecting this changesets.
197 197
198 198 Troubles are returned as strings. possible values are:
199 199 - unstable,
200 200 - bumped,
201 201 - divergent.
202 202 """
203 203 troubles = []
204 204 if self.unstable():
205 205 troubles.append('unstable')
206 206 if self.bumped():
207 207 troubles.append('bumped')
208 208 if self.divergent():
209 209 troubles.append('divergent')
210 210 return troubles
211 211
212 212 def parents(self):
213 213 """return contexts for each parent changeset"""
214 214 return self._parents
215 215
216 216 def p1(self):
217 217 return self._parents[0]
218 218
219 219 def p2(self):
220 220 if len(self._parents) == 2:
221 221 return self._parents[1]
222 222 return changectx(self._repo, -1)
223 223
224 224 def _fileinfo(self, path):
225 225 if '_manifest' in self.__dict__:
226 226 try:
227 227 return self._manifest[path], self._manifest.flags(path)
228 228 except KeyError:
229 229 raise error.ManifestLookupError(self._node, path,
230 230 _('not found in manifest'))
231 231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 232 if path in self._manifestdelta:
233 233 return (self._manifestdelta[path],
234 234 self._manifestdelta.flags(path))
235 235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 236 if not node:
237 237 raise error.ManifestLookupError(self._node, path,
238 238 _('not found in manifest'))
239 239
240 240 return node, flag
241 241
242 242 def filenode(self, path):
243 243 return self._fileinfo(path)[0]
244 244
245 245 def flags(self, path):
246 246 try:
247 247 return self._fileinfo(path)[1]
248 248 except error.LookupError:
249 249 return ''
250 250
251 251 def sub(self, path):
252 252 return subrepo.subrepo(self, path)
253 253
254 254 def nullsub(self, path, pctx):
255 255 return subrepo.nullsubrepo(self, path, pctx)
256 256
257 257 def match(self, pats=[], include=None, exclude=None, default='glob',
258 listsubrepos=False):
258 listsubrepos=False, badfn=None):
259 259 r = self._repo
260 260 return matchmod.match(r.root, r.getcwd(), pats,
261 261 include, exclude, default,
262 262 auditor=r.auditor, ctx=self,
263 listsubrepos=listsubrepos)
263 listsubrepos=listsubrepos, badfn=badfn)
264 264
265 265 def diff(self, ctx2=None, match=None, **opts):
266 266 """Returns a diff generator for the given contexts and matcher"""
267 267 if ctx2 is None:
268 268 ctx2 = self.p1()
269 269 if ctx2 is not None:
270 270 ctx2 = self._repo[ctx2]
271 271 diffopts = patch.diffopts(self._repo.ui, opts)
272 272 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
273 273
274 274 def dirs(self):
275 275 return self._manifest.dirs()
276 276
277 277 def hasdir(self, dir):
278 278 return self._manifest.hasdir(dir)
279 279
280 280 def dirty(self, missing=False, merge=True, branch=True):
281 281 return False
282 282
283 283 def status(self, other=None, match=None, listignored=False,
284 284 listclean=False, listunknown=False, listsubrepos=False):
285 285 """return status of files between two nodes or node and working
286 286 directory.
287 287
288 288 If other is None, compare this node with working directory.
289 289
290 290 returns (modified, added, removed, deleted, unknown, ignored, clean)
291 291 """
292 292
293 293 ctx1 = self
294 294 ctx2 = self._repo[other]
295 295
296 296 # This next code block is, admittedly, fragile logic that tests for
297 297 # reversing the contexts and wouldn't need to exist if it weren't for
298 298 # the fast (and common) code path of comparing the working directory
299 299 # with its first parent.
300 300 #
301 301 # What we're aiming for here is the ability to call:
302 302 #
303 303 # workingctx.status(parentctx)
304 304 #
305 305 # If we always built the manifest for each context and compared those,
306 306 # then we'd be done. But the special case of the above call means we
307 307 # just copy the manifest of the parent.
308 308 reversed = False
309 309 if (not isinstance(ctx1, changectx)
310 310 and isinstance(ctx2, changectx)):
311 311 reversed = True
312 312 ctx1, ctx2 = ctx2, ctx1
313 313
314 314 match = ctx2._matchstatus(ctx1, match)
315 315 r = scmutil.status([], [], [], [], [], [], [])
316 316 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
317 317 listunknown)
318 318
319 319 if reversed:
320 320 # Reverse added and removed. Clear deleted, unknown and ignored as
321 321 # these make no sense to reverse.
322 322 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
323 323 r.clean)
324 324
325 325 if listsubrepos:
326 326 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
327 327 rev2 = ctx2.subrev(subpath)
328 328 try:
329 329 submatch = matchmod.narrowmatcher(subpath, match)
330 330 s = sub.status(rev2, match=submatch, ignored=listignored,
331 331 clean=listclean, unknown=listunknown,
332 332 listsubrepos=True)
333 333 for rfiles, sfiles in zip(r, s):
334 334 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
335 335 except error.LookupError:
336 336 self._repo.ui.status(_("skipping missing "
337 337 "subrepository: %s\n") % subpath)
338 338
339 339 for l in r:
340 340 l.sort()
341 341
342 342 return r
343 343
344 344
345 345 def makememctx(repo, parents, text, user, date, branch, files, store,
346 346 editor=None, extra=None):
347 347 def getfilectx(repo, memctx, path):
348 348 data, mode, copied = store.getfile(path)
349 349 if data is None:
350 350 return None
351 351 islink, isexec = mode
352 352 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
353 353 copied=copied, memctx=memctx)
354 354 if extra is None:
355 355 extra = {}
356 356 if branch:
357 357 extra['branch'] = encoding.fromlocal(branch)
358 358 ctx = memctx(repo, parents, text, files, getfilectx, user,
359 359 date, extra, editor)
360 360 return ctx
361 361
362 362 class changectx(basectx):
363 363 """A changecontext object makes access to data related to a particular
364 364 changeset convenient. It represents a read-only context already present in
365 365 the repo."""
366 366 def __init__(self, repo, changeid=''):
367 367 """changeid is a revision number, node, or tag"""
368 368
369 369 # since basectx.__new__ already took care of copying the object, we
370 370 # don't need to do anything in __init__, so we just exit here
371 371 if isinstance(changeid, basectx):
372 372 return
373 373
374 374 if changeid == '':
375 375 changeid = '.'
376 376 self._repo = repo
377 377
378 378 try:
379 379 if isinstance(changeid, int):
380 380 self._node = repo.changelog.node(changeid)
381 381 self._rev = changeid
382 382 return
383 383 if isinstance(changeid, long):
384 384 changeid = str(changeid)
385 385 if changeid == 'null':
386 386 self._node = nullid
387 387 self._rev = nullrev
388 388 return
389 389 if changeid == 'tip':
390 390 self._node = repo.changelog.tip()
391 391 self._rev = repo.changelog.rev(self._node)
392 392 return
393 393 if changeid == '.' or changeid == repo.dirstate.p1():
394 394 # this is a hack to delay/avoid loading obsmarkers
395 395 # when we know that '.' won't be hidden
396 396 self._node = repo.dirstate.p1()
397 397 self._rev = repo.unfiltered().changelog.rev(self._node)
398 398 return
399 399 if len(changeid) == 20:
400 400 try:
401 401 self._node = changeid
402 402 self._rev = repo.changelog.rev(changeid)
403 403 return
404 404 except error.FilteredRepoLookupError:
405 405 raise
406 406 except LookupError:
407 407 pass
408 408
409 409 try:
410 410 r = int(changeid)
411 411 if str(r) != changeid:
412 412 raise ValueError
413 413 l = len(repo.changelog)
414 414 if r < 0:
415 415 r += l
416 416 if r < 0 or r >= l:
417 417 raise ValueError
418 418 self._rev = r
419 419 self._node = repo.changelog.node(r)
420 420 return
421 421 except error.FilteredIndexError:
422 422 raise
423 423 except (ValueError, OverflowError, IndexError):
424 424 pass
425 425
426 426 if len(changeid) == 40:
427 427 try:
428 428 self._node = bin(changeid)
429 429 self._rev = repo.changelog.rev(self._node)
430 430 return
431 431 except error.FilteredLookupError:
432 432 raise
433 433 except (TypeError, LookupError):
434 434 pass
435 435
436 436 # lookup bookmarks through the name interface
437 437 try:
438 438 self._node = repo.names.singlenode(repo, changeid)
439 439 self._rev = repo.changelog.rev(self._node)
440 440 return
441 441 except KeyError:
442 442 pass
443 443 except error.FilteredRepoLookupError:
444 444 raise
445 445 except error.RepoLookupError:
446 446 pass
447 447
448 448 self._node = repo.unfiltered().changelog._partialmatch(changeid)
449 449 if self._node is not None:
450 450 self._rev = repo.changelog.rev(self._node)
451 451 return
452 452
453 453 # lookup failed
454 454 # check if it might have come from damaged dirstate
455 455 #
456 456 # XXX we could avoid the unfiltered if we had a recognizable
457 457 # exception for filtered changeset access
458 458 if changeid in repo.unfiltered().dirstate.parents():
459 459 msg = _("working directory has unknown parent '%s'!")
460 460 raise error.Abort(msg % short(changeid))
461 461 try:
462 462 if len(changeid) == 20:
463 463 changeid = hex(changeid)
464 464 except TypeError:
465 465 pass
466 466 except (error.FilteredIndexError, error.FilteredLookupError,
467 467 error.FilteredRepoLookupError):
468 468 if repo.filtername.startswith('visible'):
469 469 msg = _("hidden revision '%s'") % changeid
470 470 hint = _('use --hidden to access hidden revisions')
471 471 raise error.FilteredRepoLookupError(msg, hint=hint)
472 472 msg = _("filtered revision '%s' (not in '%s' subset)")
473 473 msg %= (changeid, repo.filtername)
474 474 raise error.FilteredRepoLookupError(msg)
475 475 except IndexError:
476 476 pass
477 477 raise error.RepoLookupError(
478 478 _("unknown revision '%s'") % changeid)
479 479
480 480 def __hash__(self):
481 481 try:
482 482 return hash(self._rev)
483 483 except AttributeError:
484 484 return id(self)
485 485
486 486 def __nonzero__(self):
487 487 return self._rev != nullrev
488 488
489 489 @propertycache
490 490 def _changeset(self):
491 491 return self._repo.changelog.read(self.rev())
492 492
493 493 @propertycache
494 494 def _manifest(self):
495 495 return self._repo.manifest.read(self._changeset[0])
496 496
497 497 @propertycache
498 498 def _manifestdelta(self):
499 499 return self._repo.manifest.readdelta(self._changeset[0])
500 500
501 501 @propertycache
502 502 def _parents(self):
503 503 p = self._repo.changelog.parentrevs(self._rev)
504 504 if p[1] == nullrev:
505 505 p = p[:-1]
506 506 return [changectx(self._repo, x) for x in p]
507 507
508 508 def changeset(self):
509 509 return self._changeset
510 510 def manifestnode(self):
511 511 return self._changeset[0]
512 512
513 513 def user(self):
514 514 return self._changeset[1]
515 515 def date(self):
516 516 return self._changeset[2]
517 517 def files(self):
518 518 return self._changeset[3]
519 519 def description(self):
520 520 return self._changeset[4]
521 521 def branch(self):
522 522 return encoding.tolocal(self._changeset[5].get("branch"))
523 523 def closesbranch(self):
524 524 return 'close' in self._changeset[5]
525 525 def extra(self):
526 526 return self._changeset[5]
527 527 def tags(self):
528 528 return self._repo.nodetags(self._node)
529 529 def bookmarks(self):
530 530 return self._repo.nodebookmarks(self._node)
531 531 def phase(self):
532 532 return self._repo._phasecache.phase(self._repo, self._rev)
533 533 def hidden(self):
534 534 return self._rev in repoview.filterrevs(self._repo, 'visible')
535 535
536 536 def children(self):
537 537 """return contexts for each child changeset"""
538 538 c = self._repo.changelog.children(self._node)
539 539 return [changectx(self._repo, x) for x in c]
540 540
541 541 def ancestors(self):
542 542 for a in self._repo.changelog.ancestors([self._rev]):
543 543 yield changectx(self._repo, a)
544 544
545 545 def descendants(self):
546 546 for d in self._repo.changelog.descendants([self._rev]):
547 547 yield changectx(self._repo, d)
548 548
549 549 def filectx(self, path, fileid=None, filelog=None):
550 550 """get a file context from this changeset"""
551 551 if fileid is None:
552 552 fileid = self.filenode(path)
553 553 return filectx(self._repo, path, fileid=fileid,
554 554 changectx=self, filelog=filelog)
555 555
556 556 def ancestor(self, c2, warn=False):
557 557 """return the "best" ancestor context of self and c2
558 558
559 559 If there are multiple candidates, it will show a message and check
560 560 merge.preferancestor configuration before falling back to the
561 561 revlog ancestor."""
562 562 # deal with workingctxs
563 563 n2 = c2._node
564 564 if n2 is None:
565 565 n2 = c2._parents[0]._node
566 566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
567 567 if not cahs:
568 568 anc = nullid
569 569 elif len(cahs) == 1:
570 570 anc = cahs[0]
571 571 else:
572 572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
573 573 try:
574 574 ctx = changectx(self._repo, r)
575 575 except error.RepoLookupError:
576 576 continue
577 577 anc = ctx.node()
578 578 if anc in cahs:
579 579 break
580 580 else:
581 581 anc = self._repo.changelog.ancestor(self._node, n2)
582 582 if warn:
583 583 self._repo.ui.status(
584 584 (_("note: using %s as ancestor of %s and %s\n") %
585 585 (short(anc), short(self._node), short(n2))) +
586 586 ''.join(_(" alternatively, use --config "
587 587 "merge.preferancestor=%s\n") %
588 588 short(n) for n in sorted(cahs) if n != anc))
589 589 return changectx(self._repo, anc)
590 590
591 591 def descendant(self, other):
592 592 """True if other is descendant of this changeset"""
593 593 return self._repo.changelog.descendant(self._rev, other._rev)
594 594
595 595 def walk(self, match):
596 596 '''Generates matching file names.'''
597 597
598 598 # Wrap match.bad method to have message with nodeid
599 599 def bad(fn, msg):
600 600 # The manifest doesn't know about subrepos, so don't complain about
601 601 # paths into valid subrepos.
602 602 if any(fn == s or fn.startswith(s + '/')
603 603 for s in self.substate):
604 604 return
605 605 match.bad(fn, _('no such file in rev %s') % self)
606 606
607 607 m = matchmod.badmatch(match, bad)
608 608 return self._manifest.walk(m)
609 609
610 610 def matches(self, match):
611 611 return self.walk(match)
612 612
613 613 class basefilectx(object):
614 614 """A filecontext object represents the common logic for its children:
615 615 filectx: read-only access to a filerevision that is already present
616 616 in the repo,
617 617 workingfilectx: a filecontext that represents files from the working
618 618 directory,
619 619 memfilectx: a filecontext that represents files in-memory."""
620 620 def __new__(cls, repo, path, *args, **kwargs):
621 621 return super(basefilectx, cls).__new__(cls)
622 622
623 623 @propertycache
624 624 def _filelog(self):
625 625 return self._repo.file(self._path)
626 626
627 627 @propertycache
628 628 def _changeid(self):
629 629 if '_changeid' in self.__dict__:
630 630 return self._changeid
631 631 elif '_changectx' in self.__dict__:
632 632 return self._changectx.rev()
633 633 elif '_descendantrev' in self.__dict__:
634 634 # this file context was created from a revision with a known
635 635 # descendant, we can (lazily) correct for linkrev aliases
636 636 return self._adjustlinkrev(self._path, self._filelog,
637 637 self._filenode, self._descendantrev)
638 638 else:
639 639 return self._filelog.linkrev(self._filerev)
640 640
641 641 @propertycache
642 642 def _filenode(self):
643 643 if '_fileid' in self.__dict__:
644 644 return self._filelog.lookup(self._fileid)
645 645 else:
646 646 return self._changectx.filenode(self._path)
647 647
648 648 @propertycache
649 649 def _filerev(self):
650 650 return self._filelog.rev(self._filenode)
651 651
652 652 @propertycache
653 653 def _repopath(self):
654 654 return self._path
655 655
656 656 def __nonzero__(self):
657 657 try:
658 658 self._filenode
659 659 return True
660 660 except error.LookupError:
661 661 # file is missing
662 662 return False
663 663
664 664 def __str__(self):
665 665 return "%s@%s" % (self.path(), self._changectx)
666 666
667 667 def __repr__(self):
668 668 return "<%s %s>" % (type(self).__name__, str(self))
669 669
670 670 def __hash__(self):
671 671 try:
672 672 return hash((self._path, self._filenode))
673 673 except AttributeError:
674 674 return id(self)
675 675
676 676 def __eq__(self, other):
677 677 try:
678 678 return (type(self) == type(other) and self._path == other._path
679 679 and self._filenode == other._filenode)
680 680 except AttributeError:
681 681 return False
682 682
683 683 def __ne__(self, other):
684 684 return not (self == other)
685 685
686 686 def filerev(self):
687 687 return self._filerev
688 688 def filenode(self):
689 689 return self._filenode
690 690 def flags(self):
691 691 return self._changectx.flags(self._path)
692 692 def filelog(self):
693 693 return self._filelog
694 694 def rev(self):
695 695 return self._changeid
696 696 def linkrev(self):
697 697 return self._filelog.linkrev(self._filerev)
698 698 def node(self):
699 699 return self._changectx.node()
700 700 def hex(self):
701 701 return self._changectx.hex()
702 702 def user(self):
703 703 return self._changectx.user()
704 704 def date(self):
705 705 return self._changectx.date()
706 706 def files(self):
707 707 return self._changectx.files()
708 708 def description(self):
709 709 return self._changectx.description()
710 710 def branch(self):
711 711 return self._changectx.branch()
712 712 def extra(self):
713 713 return self._changectx.extra()
714 714 def phase(self):
715 715 return self._changectx.phase()
716 716 def phasestr(self):
717 717 return self._changectx.phasestr()
718 718 def manifest(self):
719 719 return self._changectx.manifest()
720 720 def changectx(self):
721 721 return self._changectx
722 722 def repo(self):
723 723 return self._repo
724 724
725 725 def path(self):
726 726 return self._path
727 727
728 728 def isbinary(self):
729 729 try:
730 730 return util.binary(self.data())
731 731 except IOError:
732 732 return False
733 733 def isexec(self):
734 734 return 'x' in self.flags()
735 735 def islink(self):
736 736 return 'l' in self.flags()
737 737
738 738 def cmp(self, fctx):
739 739 """compare with other file context
740 740
741 741 returns True if different than fctx.
742 742 """
743 743 if (fctx._filerev is None
744 744 and (self._repo._encodefilterpats
745 745 # if file data starts with '\1\n', empty metadata block is
746 746 # prepended, which adds 4 bytes to filelog.size().
747 747 or self.size() - 4 == fctx.size())
748 748 or self.size() == fctx.size()):
749 749 return self._filelog.cmp(self._filenode, fctx.data())
750 750
751 751 return True
752 752
753 753 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
754 754 """return the first ancestor of <srcrev> introducing <fnode>
755 755
756 756 If the linkrev of the file revision does not point to an ancestor of
757 757 srcrev, we'll walk down the ancestors until we find one introducing
758 758 this file revision.
759 759
760 760 :repo: a localrepository object (used to access changelog and manifest)
761 761 :path: the file path
762 762 :fnode: the nodeid of the file revision
763 763 :filelog: the filelog of this path
764 764 :srcrev: the changeset revision we search ancestors from
765 765 :inclusive: if true, the src revision will also be checked
766 766 """
767 767 repo = self._repo
768 768 cl = repo.unfiltered().changelog
769 769 ma = repo.manifest
770 770 # fetch the linkrev
771 771 fr = filelog.rev(fnode)
772 772 lkr = filelog.linkrev(fr)
773 773 # hack to reuse ancestor computation when searching for renames
774 774 memberanc = getattr(self, '_ancestrycontext', None)
775 775 iteranc = None
776 776 if srcrev is None:
777 777 # wctx case, used by workingfilectx during mergecopy
778 778 revs = [p.rev() for p in self._repo[None].parents()]
779 779 inclusive = True # we skipped the real (revless) source
780 780 else:
781 781 revs = [srcrev]
782 782 if memberanc is None:
783 783 memberanc = iteranc = cl.ancestors(revs, lkr,
784 784 inclusive=inclusive)
785 785 # check if this linkrev is an ancestor of srcrev
786 786 if lkr not in memberanc:
787 787 if iteranc is None:
788 788 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
789 789 for a in iteranc:
790 790 ac = cl.read(a) # get changeset data (we avoid object creation)
791 791 if path in ac[3]: # checking the 'files' field.
792 792 # The file has been touched, check if the content is
793 793 # similar to the one we search for.
794 794 if fnode == ma.readfast(ac[0]).get(path):
795 795 return a
796 796 # In theory, we should never get out of that loop without a result.
797 797 # But if manifest uses a buggy file revision (not children of the
798 798 # one it replaces) we could. Such a buggy situation will likely
799 799 # result is crash somewhere else at to some point.
800 800 return lkr
801 801
802 802 def introrev(self):
803 803 """return the rev of the changeset which introduced this file revision
804 804
805 805 This method is different from linkrev because it take into account the
806 806 changeset the filectx was created from. It ensures the returned
807 807 revision is one of its ancestors. This prevents bugs from
808 808 'linkrev-shadowing' when a file revision is used by multiple
809 809 changesets.
810 810 """
811 811 lkr = self.linkrev()
812 812 attrs = vars(self)
813 813 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
814 814 if noctx or self.rev() == lkr:
815 815 return self.linkrev()
816 816 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
817 817 self.rev(), inclusive=True)
818 818
819 819 def _parentfilectx(self, path, fileid, filelog):
820 820 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
821 821 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
822 822 if '_changeid' in vars(self) or '_changectx' in vars(self):
823 823 # If self is associated with a changeset (probably explicitly
824 824 # fed), ensure the created filectx is associated with a
825 825 # changeset that is an ancestor of self.changectx.
826 826 # This lets us later use _adjustlinkrev to get a correct link.
827 827 fctx._descendantrev = self.rev()
828 828 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
829 829 elif '_descendantrev' in vars(self):
830 830 # Otherwise propagate _descendantrev if we have one associated.
831 831 fctx._descendantrev = self._descendantrev
832 832 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
833 833 return fctx
834 834
835 835 def parents(self):
836 836 _path = self._path
837 837 fl = self._filelog
838 838 parents = self._filelog.parents(self._filenode)
839 839 pl = [(_path, node, fl) for node in parents if node != nullid]
840 840
841 841 r = fl.renamed(self._filenode)
842 842 if r:
843 843 # - In the simple rename case, both parent are nullid, pl is empty.
844 844 # - In case of merge, only one of the parent is null id and should
845 845 # be replaced with the rename information. This parent is -always-
846 846 # the first one.
847 847 #
848 848 # As null id have always been filtered out in the previous list
849 849 # comprehension, inserting to 0 will always result in "replacing
850 850 # first nullid parent with rename information.
851 851 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
852 852
853 853 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
854 854
855 855 def p1(self):
856 856 return self.parents()[0]
857 857
858 858 def p2(self):
859 859 p = self.parents()
860 860 if len(p) == 2:
861 861 return p[1]
862 862 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
863 863
864 864 def annotate(self, follow=False, linenumber=None, diffopts=None):
865 865 '''returns a list of tuples of (ctx, line) for each line
866 866 in the file, where ctx is the filectx of the node where
867 867 that line was last changed.
868 868 This returns tuples of ((ctx, linenumber), line) for each line,
869 869 if "linenumber" parameter is NOT "None".
870 870 In such tuples, linenumber means one at the first appearance
871 871 in the managed file.
872 872 To reduce annotation cost,
873 873 this returns fixed value(False is used) as linenumber,
874 874 if "linenumber" parameter is "False".'''
875 875
876 876 if linenumber is None:
877 877 def decorate(text, rev):
878 878 return ([rev] * len(text.splitlines()), text)
879 879 elif linenumber:
880 880 def decorate(text, rev):
881 881 size = len(text.splitlines())
882 882 return ([(rev, i) for i in xrange(1, size + 1)], text)
883 883 else:
884 884 def decorate(text, rev):
885 885 return ([(rev, False)] * len(text.splitlines()), text)
886 886
887 887 def pair(parent, child):
888 888 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
889 889 refine=True)
890 890 for (a1, a2, b1, b2), t in blocks:
891 891 # Changed blocks ('!') or blocks made only of blank lines ('~')
892 892 # belong to the child.
893 893 if t == '=':
894 894 child[0][b1:b2] = parent[0][a1:a2]
895 895 return child
896 896
897 897 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
898 898
899 899 def parents(f):
900 900 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
901 901 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
902 902 # from the topmost introrev (= srcrev) down to p.linkrev() if it
903 903 # isn't an ancestor of the srcrev.
904 904 f._changeid
905 905 pl = f.parents()
906 906
907 907 # Don't return renamed parents if we aren't following.
908 908 if not follow:
909 909 pl = [p for p in pl if p.path() == f.path()]
910 910
911 911 # renamed filectx won't have a filelog yet, so set it
912 912 # from the cache to save time
913 913 for p in pl:
914 914 if not '_filelog' in p.__dict__:
915 915 p._filelog = getlog(p.path())
916 916
917 917 return pl
918 918
919 919 # use linkrev to find the first changeset where self appeared
920 920 base = self
921 921 introrev = self.introrev()
922 922 if self.rev() != introrev:
923 923 base = self.filectx(self.filenode(), changeid=introrev)
924 924 if getattr(base, '_ancestrycontext', None) is None:
925 925 cl = self._repo.changelog
926 926 if introrev is None:
927 927 # wctx is not inclusive, but works because _ancestrycontext
928 928 # is used to test filelog revisions
929 929 ac = cl.ancestors([p.rev() for p in base.parents()],
930 930 inclusive=True)
931 931 else:
932 932 ac = cl.ancestors([introrev], inclusive=True)
933 933 base._ancestrycontext = ac
934 934
935 935 # This algorithm would prefer to be recursive, but Python is a
936 936 # bit recursion-hostile. Instead we do an iterative
937 937 # depth-first search.
938 938
939 939 visit = [base]
940 940 hist = {}
941 941 pcache = {}
942 942 needed = {base: 1}
943 943 while visit:
944 944 f = visit[-1]
945 945 pcached = f in pcache
946 946 if not pcached:
947 947 pcache[f] = parents(f)
948 948
949 949 ready = True
950 950 pl = pcache[f]
951 951 for p in pl:
952 952 if p not in hist:
953 953 ready = False
954 954 visit.append(p)
955 955 if not pcached:
956 956 needed[p] = needed.get(p, 0) + 1
957 957 if ready:
958 958 visit.pop()
959 959 reusable = f in hist
960 960 if reusable:
961 961 curr = hist[f]
962 962 else:
963 963 curr = decorate(f.data(), f)
964 964 for p in pl:
965 965 if not reusable:
966 966 curr = pair(hist[p], curr)
967 967 if needed[p] == 1:
968 968 del hist[p]
969 969 del needed[p]
970 970 else:
971 971 needed[p] -= 1
972 972
973 973 hist[f] = curr
974 974 pcache[f] = []
975 975
976 976 return zip(hist[base][0], hist[base][1].splitlines(True))
977 977
978 978 def ancestors(self, followfirst=False):
979 979 visit = {}
980 980 c = self
981 981 if followfirst:
982 982 cut = 1
983 983 else:
984 984 cut = None
985 985
986 986 while True:
987 987 for parent in c.parents()[:cut]:
988 988 visit[(parent.linkrev(), parent.filenode())] = parent
989 989 if not visit:
990 990 break
991 991 c = visit.pop(max(visit))
992 992 yield c
993 993
994 994 class filectx(basefilectx):
995 995 """A filecontext object makes access to data related to a particular
996 996 filerevision convenient."""
997 997 def __init__(self, repo, path, changeid=None, fileid=None,
998 998 filelog=None, changectx=None):
999 999 """changeid can be a changeset revision, node, or tag.
1000 1000 fileid can be a file revision or node."""
1001 1001 self._repo = repo
1002 1002 self._path = path
1003 1003
1004 1004 assert (changeid is not None
1005 1005 or fileid is not None
1006 1006 or changectx is not None), \
1007 1007 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1008 1008 % (changeid, fileid, changectx))
1009 1009
1010 1010 if filelog is not None:
1011 1011 self._filelog = filelog
1012 1012
1013 1013 if changeid is not None:
1014 1014 self._changeid = changeid
1015 1015 if changectx is not None:
1016 1016 self._changectx = changectx
1017 1017 if fileid is not None:
1018 1018 self._fileid = fileid
1019 1019
1020 1020 @propertycache
1021 1021 def _changectx(self):
1022 1022 try:
1023 1023 return changectx(self._repo, self._changeid)
1024 1024 except error.FilteredRepoLookupError:
1025 1025 # Linkrev may point to any revision in the repository. When the
1026 1026 # repository is filtered this may lead to `filectx` trying to build
1027 1027 # `changectx` for filtered revision. In such case we fallback to
1028 1028 # creating `changectx` on the unfiltered version of the reposition.
1029 1029 # This fallback should not be an issue because `changectx` from
1030 1030 # `filectx` are not used in complex operations that care about
1031 1031 # filtering.
1032 1032 #
1033 1033 # This fallback is a cheap and dirty fix that prevent several
1034 1034 # crashes. It does not ensure the behavior is correct. However the
1035 1035 # behavior was not correct before filtering either and "incorrect
1036 1036 # behavior" is seen as better as "crash"
1037 1037 #
1038 1038 # Linkrevs have several serious troubles with filtering that are
1039 1039 # complicated to solve. Proper handling of the issue here should be
1040 1040 # considered when solving linkrev issue are on the table.
1041 1041 return changectx(self._repo.unfiltered(), self._changeid)
1042 1042
1043 1043 def filectx(self, fileid, changeid=None):
1044 1044 '''opens an arbitrary revision of the file without
1045 1045 opening a new filelog'''
1046 1046 return filectx(self._repo, self._path, fileid=fileid,
1047 1047 filelog=self._filelog, changeid=changeid)
1048 1048
1049 1049 def data(self):
1050 1050 try:
1051 1051 return self._filelog.read(self._filenode)
1052 1052 except error.CensoredNodeError:
1053 1053 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1054 1054 return ""
1055 1055 raise util.Abort(_("censored node: %s") % short(self._filenode),
1056 1056 hint=_("set censor.policy to ignore errors"))
1057 1057
1058 1058 def size(self):
1059 1059 return self._filelog.size(self._filerev)
1060 1060
1061 1061 def renamed(self):
1062 1062 """check if file was actually renamed in this changeset revision
1063 1063
1064 1064 If rename logged in file revision, we report copy for changeset only
1065 1065 if file revisions linkrev points back to the changeset in question
1066 1066 or both changeset parents contain different file revisions.
1067 1067 """
1068 1068
1069 1069 renamed = self._filelog.renamed(self._filenode)
1070 1070 if not renamed:
1071 1071 return renamed
1072 1072
1073 1073 if self.rev() == self.linkrev():
1074 1074 return renamed
1075 1075
1076 1076 name = self.path()
1077 1077 fnode = self._filenode
1078 1078 for p in self._changectx.parents():
1079 1079 try:
1080 1080 if fnode == p.filenode(name):
1081 1081 return None
1082 1082 except error.LookupError:
1083 1083 pass
1084 1084 return renamed
1085 1085
1086 1086 def children(self):
1087 1087 # hard for renames
1088 1088 c = self._filelog.children(self._filenode)
1089 1089 return [filectx(self._repo, self._path, fileid=x,
1090 1090 filelog=self._filelog) for x in c]
1091 1091
1092 1092 class committablectx(basectx):
1093 1093 """A committablectx object provides common functionality for a context that
1094 1094 wants the ability to commit, e.g. workingctx or memctx."""
1095 1095 def __init__(self, repo, text="", user=None, date=None, extra=None,
1096 1096 changes=None):
1097 1097 self._repo = repo
1098 1098 self._rev = None
1099 1099 self._node = None
1100 1100 self._text = text
1101 1101 if date:
1102 1102 self._date = util.parsedate(date)
1103 1103 if user:
1104 1104 self._user = user
1105 1105 if changes:
1106 1106 self._status = changes
1107 1107
1108 1108 self._extra = {}
1109 1109 if extra:
1110 1110 self._extra = extra.copy()
1111 1111 if 'branch' not in self._extra:
1112 1112 try:
1113 1113 branch = encoding.fromlocal(self._repo.dirstate.branch())
1114 1114 except UnicodeDecodeError:
1115 1115 raise util.Abort(_('branch name not in UTF-8!'))
1116 1116 self._extra['branch'] = branch
1117 1117 if self._extra['branch'] == '':
1118 1118 self._extra['branch'] = 'default'
1119 1119
1120 1120 def __str__(self):
1121 1121 return str(self._parents[0]) + "+"
1122 1122
1123 1123 def __nonzero__(self):
1124 1124 return True
1125 1125
1126 1126 def _buildflagfunc(self):
1127 1127 # Create a fallback function for getting file flags when the
1128 1128 # filesystem doesn't support them
1129 1129
1130 1130 copiesget = self._repo.dirstate.copies().get
1131 1131
1132 1132 if len(self._parents) < 2:
1133 1133 # when we have one parent, it's easy: copy from parent
1134 1134 man = self._parents[0].manifest()
1135 1135 def func(f):
1136 1136 f = copiesget(f, f)
1137 1137 return man.flags(f)
1138 1138 else:
1139 1139 # merges are tricky: we try to reconstruct the unstored
1140 1140 # result from the merge (issue1802)
1141 1141 p1, p2 = self._parents
1142 1142 pa = p1.ancestor(p2)
1143 1143 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1144 1144
1145 1145 def func(f):
1146 1146 f = copiesget(f, f) # may be wrong for merges with copies
1147 1147 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1148 1148 if fl1 == fl2:
1149 1149 return fl1
1150 1150 if fl1 == fla:
1151 1151 return fl2
1152 1152 if fl2 == fla:
1153 1153 return fl1
1154 1154 return '' # punt for conflicts
1155 1155
1156 1156 return func
1157 1157
1158 1158 @propertycache
1159 1159 def _flagfunc(self):
1160 1160 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1161 1161
1162 1162 @propertycache
1163 1163 def _manifest(self):
1164 1164 """generate a manifest corresponding to the values in self._status
1165 1165
1166 1166 This reuse the file nodeid from parent, but we append an extra letter
1167 1167 when modified. Modified files get an extra 'm' while added files get
1168 1168 an extra 'a'. This is used by manifests merge to see that files
1169 1169 are different and by update logic to avoid deleting newly added files.
1170 1170 """
1171 1171
1172 1172 man1 = self._parents[0].manifest()
1173 1173 man = man1.copy()
1174 1174 if len(self._parents) > 1:
1175 1175 man2 = self.p2().manifest()
1176 1176 def getman(f):
1177 1177 if f in man1:
1178 1178 return man1
1179 1179 return man2
1180 1180 else:
1181 1181 getman = lambda f: man1
1182 1182
1183 1183 copied = self._repo.dirstate.copies()
1184 1184 ff = self._flagfunc
1185 1185 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1186 1186 for f in l:
1187 1187 orig = copied.get(f, f)
1188 1188 man[f] = getman(orig).get(orig, nullid) + i
1189 1189 try:
1190 1190 man.setflag(f, ff(f))
1191 1191 except OSError:
1192 1192 pass
1193 1193
1194 1194 for f in self._status.deleted + self._status.removed:
1195 1195 if f in man:
1196 1196 del man[f]
1197 1197
1198 1198 return man
1199 1199
1200 1200 @propertycache
1201 1201 def _status(self):
1202 1202 return self._repo.status()
1203 1203
1204 1204 @propertycache
1205 1205 def _user(self):
1206 1206 return self._repo.ui.username()
1207 1207
1208 1208 @propertycache
1209 1209 def _date(self):
1210 1210 return util.makedate()
1211 1211
1212 1212 def subrev(self, subpath):
1213 1213 return None
1214 1214
1215 1215 def manifestnode(self):
1216 1216 return None
1217 1217 def user(self):
1218 1218 return self._user or self._repo.ui.username()
1219 1219 def date(self):
1220 1220 return self._date
1221 1221 def description(self):
1222 1222 return self._text
1223 1223 def files(self):
1224 1224 return sorted(self._status.modified + self._status.added +
1225 1225 self._status.removed)
1226 1226
1227 1227 def modified(self):
1228 1228 return self._status.modified
1229 1229 def added(self):
1230 1230 return self._status.added
1231 1231 def removed(self):
1232 1232 return self._status.removed
1233 1233 def deleted(self):
1234 1234 return self._status.deleted
1235 1235 def branch(self):
1236 1236 return encoding.tolocal(self._extra['branch'])
1237 1237 def closesbranch(self):
1238 1238 return 'close' in self._extra
1239 1239 def extra(self):
1240 1240 return self._extra
1241 1241
1242 1242 def tags(self):
1243 1243 t = []
1244 1244 for p in self.parents():
1245 1245 t.extend(p.tags())
1246 1246 return t
1247 1247
1248 1248 def bookmarks(self):
1249 1249 b = []
1250 1250 for p in self.parents():
1251 1251 b.extend(p.bookmarks())
1252 1252 return b
1253 1253
1254 1254 def phase(self):
1255 1255 phase = phases.draft # default phase to draft
1256 1256 for p in self.parents():
1257 1257 phase = max(phase, p.phase())
1258 1258 return phase
1259 1259
1260 1260 def hidden(self):
1261 1261 return False
1262 1262
1263 1263 def children(self):
1264 1264 return []
1265 1265
1266 1266 def flags(self, path):
1267 1267 if '_manifest' in self.__dict__:
1268 1268 try:
1269 1269 return self._manifest.flags(path)
1270 1270 except KeyError:
1271 1271 return ''
1272 1272
1273 1273 try:
1274 1274 return self._flagfunc(path)
1275 1275 except OSError:
1276 1276 return ''
1277 1277
1278 1278 def ancestor(self, c2):
1279 1279 """return the "best" ancestor context of self and c2"""
1280 1280 return self._parents[0].ancestor(c2) # punt on two parents for now
1281 1281
1282 1282 def walk(self, match):
1283 1283 '''Generates matching file names.'''
1284 1284 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1285 1285 True, False))
1286 1286
1287 1287 def matches(self, match):
1288 1288 return sorted(self._repo.dirstate.matches(match))
1289 1289
1290 1290 def ancestors(self):
1291 1291 for p in self._parents:
1292 1292 yield p
1293 1293 for a in self._repo.changelog.ancestors(
1294 1294 [p.rev() for p in self._parents]):
1295 1295 yield changectx(self._repo, a)
1296 1296
1297 1297 def markcommitted(self, node):
1298 1298 """Perform post-commit cleanup necessary after committing this ctx
1299 1299
1300 1300 Specifically, this updates backing stores this working context
1301 1301 wraps to reflect the fact that the changes reflected by this
1302 1302 workingctx have been committed. For example, it marks
1303 1303 modified and added files as normal in the dirstate.
1304 1304
1305 1305 """
1306 1306
1307 1307 self._repo.dirstate.beginparentchange()
1308 1308 for f in self.modified() + self.added():
1309 1309 self._repo.dirstate.normal(f)
1310 1310 for f in self.removed():
1311 1311 self._repo.dirstate.drop(f)
1312 1312 self._repo.dirstate.setparents(node)
1313 1313 self._repo.dirstate.endparentchange()
1314 1314
1315 1315 class workingctx(committablectx):
1316 1316 """A workingctx object makes access to data related to
1317 1317 the current working directory convenient.
1318 1318 date - any valid date string or (unixtime, offset), or None.
1319 1319 user - username string, or None.
1320 1320 extra - a dictionary of extra values, or None.
1321 1321 changes - a list of file lists as returned by localrepo.status()
1322 1322 or None to use the repository status.
1323 1323 """
1324 1324 def __init__(self, repo, text="", user=None, date=None, extra=None,
1325 1325 changes=None):
1326 1326 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1327 1327
1328 1328 def __iter__(self):
1329 1329 d = self._repo.dirstate
1330 1330 for f in d:
1331 1331 if d[f] != 'r':
1332 1332 yield f
1333 1333
1334 1334 def __contains__(self, key):
1335 1335 return self._repo.dirstate[key] not in "?r"
1336 1336
1337 1337 @propertycache
1338 1338 def _parents(self):
1339 1339 p = self._repo.dirstate.parents()
1340 1340 if p[1] == nullid:
1341 1341 p = p[:-1]
1342 1342 return [changectx(self._repo, x) for x in p]
1343 1343
1344 1344 def filectx(self, path, filelog=None):
1345 1345 """get a file context from the working directory"""
1346 1346 return workingfilectx(self._repo, path, workingctx=self,
1347 1347 filelog=filelog)
1348 1348
1349 1349 def dirty(self, missing=False, merge=True, branch=True):
1350 1350 "check whether a working directory is modified"
1351 1351 # check subrepos first
1352 1352 for s in sorted(self.substate):
1353 1353 if self.sub(s).dirty():
1354 1354 return True
1355 1355 # check current working dir
1356 1356 return ((merge and self.p2()) or
1357 1357 (branch and self.branch() != self.p1().branch()) or
1358 1358 self.modified() or self.added() or self.removed() or
1359 1359 (missing and self.deleted()))
1360 1360
1361 1361 def add(self, list, prefix=""):
1362 1362 join = lambda f: os.path.join(prefix, f)
1363 1363 wlock = self._repo.wlock()
1364 1364 ui, ds = self._repo.ui, self._repo.dirstate
1365 1365 try:
1366 1366 rejected = []
1367 1367 lstat = self._repo.wvfs.lstat
1368 1368 for f in list:
1369 1369 scmutil.checkportable(ui, join(f))
1370 1370 try:
1371 1371 st = lstat(f)
1372 1372 except OSError:
1373 1373 ui.warn(_("%s does not exist!\n") % join(f))
1374 1374 rejected.append(f)
1375 1375 continue
1376 1376 if st.st_size > 10000000:
1377 1377 ui.warn(_("%s: up to %d MB of RAM may be required "
1378 1378 "to manage this file\n"
1379 1379 "(use 'hg revert %s' to cancel the "
1380 1380 "pending addition)\n")
1381 1381 % (f, 3 * st.st_size // 1000000, join(f)))
1382 1382 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1383 1383 ui.warn(_("%s not added: only files and symlinks "
1384 1384 "supported currently\n") % join(f))
1385 1385 rejected.append(f)
1386 1386 elif ds[f] in 'amn':
1387 1387 ui.warn(_("%s already tracked!\n") % join(f))
1388 1388 elif ds[f] == 'r':
1389 1389 ds.normallookup(f)
1390 1390 else:
1391 1391 ds.add(f)
1392 1392 return rejected
1393 1393 finally:
1394 1394 wlock.release()
1395 1395
1396 1396 def forget(self, files, prefix=""):
1397 1397 join = lambda f: os.path.join(prefix, f)
1398 1398 wlock = self._repo.wlock()
1399 1399 try:
1400 1400 rejected = []
1401 1401 for f in files:
1402 1402 if f not in self._repo.dirstate:
1403 1403 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1404 1404 rejected.append(f)
1405 1405 elif self._repo.dirstate[f] != 'a':
1406 1406 self._repo.dirstate.remove(f)
1407 1407 else:
1408 1408 self._repo.dirstate.drop(f)
1409 1409 return rejected
1410 1410 finally:
1411 1411 wlock.release()
1412 1412
1413 1413 def undelete(self, list):
1414 1414 pctxs = self.parents()
1415 1415 wlock = self._repo.wlock()
1416 1416 try:
1417 1417 for f in list:
1418 1418 if self._repo.dirstate[f] != 'r':
1419 1419 self._repo.ui.warn(_("%s not removed!\n") % f)
1420 1420 else:
1421 1421 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1422 1422 t = fctx.data()
1423 1423 self._repo.wwrite(f, t, fctx.flags())
1424 1424 self._repo.dirstate.normal(f)
1425 1425 finally:
1426 1426 wlock.release()
1427 1427
1428 1428 def copy(self, source, dest):
1429 1429 try:
1430 1430 st = self._repo.wvfs.lstat(dest)
1431 1431 except OSError, err:
1432 1432 if err.errno != errno.ENOENT:
1433 1433 raise
1434 1434 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1435 1435 return
1436 1436 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1437 1437 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1438 1438 "symbolic link\n") % dest)
1439 1439 else:
1440 1440 wlock = self._repo.wlock()
1441 1441 try:
1442 1442 if self._repo.dirstate[dest] in '?':
1443 1443 self._repo.dirstate.add(dest)
1444 1444 elif self._repo.dirstate[dest] in 'r':
1445 1445 self._repo.dirstate.normallookup(dest)
1446 1446 self._repo.dirstate.copy(source, dest)
1447 1447 finally:
1448 1448 wlock.release()
1449 1449
1450 1450 def match(self, pats=[], include=None, exclude=None, default='glob',
1451 listsubrepos=False):
1451 listsubrepos=False, badfn=None):
1452 1452 r = self._repo
1453 1453
1454 1454 # Only a case insensitive filesystem needs magic to translate user input
1455 1455 # to actual case in the filesystem.
1456 1456 if not util.checkcase(r.root):
1457 1457 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1458 1458 exclude, default, r.auditor, self,
1459 listsubrepos=listsubrepos)
1459 listsubrepos=listsubrepos,
1460 badfn=badfn)
1460 1461 return matchmod.match(r.root, r.getcwd(), pats,
1461 1462 include, exclude, default,
1462 1463 auditor=r.auditor, ctx=self,
1463 listsubrepos=listsubrepos)
1464 listsubrepos=listsubrepos, badfn=badfn)
1464 1465
1465 1466 def _filtersuspectsymlink(self, files):
1466 1467 if not files or self._repo.dirstate._checklink:
1467 1468 return files
1468 1469
1469 1470 # Symlink placeholders may get non-symlink-like contents
1470 1471 # via user error or dereferencing by NFS or Samba servers,
1471 1472 # so we filter out any placeholders that don't look like a
1472 1473 # symlink
1473 1474 sane = []
1474 1475 for f in files:
1475 1476 if self.flags(f) == 'l':
1476 1477 d = self[f].data()
1477 1478 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1478 1479 self._repo.ui.debug('ignoring suspect symlink placeholder'
1479 1480 ' "%s"\n' % f)
1480 1481 continue
1481 1482 sane.append(f)
1482 1483 return sane
1483 1484
1484 1485 def _checklookup(self, files):
1485 1486 # check for any possibly clean files
1486 1487 if not files:
1487 1488 return [], []
1488 1489
1489 1490 modified = []
1490 1491 fixup = []
1491 1492 pctx = self._parents[0]
1492 1493 # do a full compare of any files that might have changed
1493 1494 for f in sorted(files):
1494 1495 if (f not in pctx or self.flags(f) != pctx.flags(f)
1495 1496 or pctx[f].cmp(self[f])):
1496 1497 modified.append(f)
1497 1498 else:
1498 1499 fixup.append(f)
1499 1500
1500 1501 # update dirstate for files that are actually clean
1501 1502 if fixup:
1502 1503 try:
1503 1504 # updating the dirstate is optional
1504 1505 # so we don't wait on the lock
1505 1506 # wlock can invalidate the dirstate, so cache normal _after_
1506 1507 # taking the lock
1507 1508 wlock = self._repo.wlock(False)
1508 1509 normal = self._repo.dirstate.normal
1509 1510 try:
1510 1511 for f in fixup:
1511 1512 normal(f)
1512 1513 finally:
1513 1514 wlock.release()
1514 1515 except error.LockError:
1515 1516 pass
1516 1517 return modified, fixup
1517 1518
1518 1519 def _manifestmatches(self, match, s):
1519 1520 """Slow path for workingctx
1520 1521
1521 1522 The fast path is when we compare the working directory to its parent
1522 1523 which means this function is comparing with a non-parent; therefore we
1523 1524 need to build a manifest and return what matches.
1524 1525 """
1525 1526 mf = self._repo['.']._manifestmatches(match, s)
1526 1527 for f in s.modified + s.added:
1527 1528 mf[f] = _newnode
1528 1529 mf.setflag(f, self.flags(f))
1529 1530 for f in s.removed:
1530 1531 if f in mf:
1531 1532 del mf[f]
1532 1533 return mf
1533 1534
1534 1535 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1535 1536 unknown=False):
1536 1537 '''Gets the status from the dirstate -- internal use only.'''
1537 1538 listignored, listclean, listunknown = ignored, clean, unknown
1538 1539 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1539 1540 subrepos = []
1540 1541 if '.hgsub' in self:
1541 1542 subrepos = sorted(self.substate)
1542 1543 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1543 1544 listclean, listunknown)
1544 1545
1545 1546 # check for any possibly clean files
1546 1547 if cmp:
1547 1548 modified2, fixup = self._checklookup(cmp)
1548 1549 s.modified.extend(modified2)
1549 1550
1550 1551 # update dirstate for files that are actually clean
1551 1552 if fixup and listclean:
1552 1553 s.clean.extend(fixup)
1553 1554
1554 1555 if match.always():
1555 1556 # cache for performance
1556 1557 if s.unknown or s.ignored or s.clean:
1557 1558 # "_status" is cached with list*=False in the normal route
1558 1559 self._status = scmutil.status(s.modified, s.added, s.removed,
1559 1560 s.deleted, [], [], [])
1560 1561 else:
1561 1562 self._status = s
1562 1563
1563 1564 return s
1564 1565
1565 1566 def _buildstatus(self, other, s, match, listignored, listclean,
1566 1567 listunknown):
1567 1568 """build a status with respect to another context
1568 1569
1569 1570 This includes logic for maintaining the fast path of status when
1570 1571 comparing the working directory against its parent, which is to skip
1571 1572 building a new manifest if self (working directory) is not comparing
1572 1573 against its parent (repo['.']).
1573 1574 """
1574 1575 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1575 1576 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1576 1577 # might have accidentally ended up with the entire contents of the file
1577 1578 # they are supposed to be linking to.
1578 1579 s.modified[:] = self._filtersuspectsymlink(s.modified)
1579 1580 if other != self._repo['.']:
1580 1581 s = super(workingctx, self)._buildstatus(other, s, match,
1581 1582 listignored, listclean,
1582 1583 listunknown)
1583 1584 return s
1584 1585
1585 1586 def _matchstatus(self, other, match):
1586 1587 """override the match method with a filter for directory patterns
1587 1588
1588 1589 We use inheritance to customize the match.bad method only in cases of
1589 1590 workingctx since it belongs only to the working directory when
1590 1591 comparing against the parent changeset.
1591 1592
1592 1593 If we aren't comparing against the working directory's parent, then we
1593 1594 just use the default match object sent to us.
1594 1595 """
1595 1596 superself = super(workingctx, self)
1596 1597 match = superself._matchstatus(other, match)
1597 1598 if other != self._repo['.']:
1598 1599 def bad(f, msg):
1599 1600 # 'f' may be a directory pattern from 'match.files()',
1600 1601 # so 'f not in ctx1' is not enough
1601 1602 if f not in other and not other.hasdir(f):
1602 1603 self._repo.ui.warn('%s: %s\n' %
1603 1604 (self._repo.dirstate.pathto(f), msg))
1604 1605 match.bad = bad
1605 1606 return match
1606 1607
1607 1608 class committablefilectx(basefilectx):
1608 1609 """A committablefilectx provides common functionality for a file context
1609 1610 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1610 1611 def __init__(self, repo, path, filelog=None, ctx=None):
1611 1612 self._repo = repo
1612 1613 self._path = path
1613 1614 self._changeid = None
1614 1615 self._filerev = self._filenode = None
1615 1616
1616 1617 if filelog is not None:
1617 1618 self._filelog = filelog
1618 1619 if ctx:
1619 1620 self._changectx = ctx
1620 1621
1621 1622 def __nonzero__(self):
1622 1623 return True
1623 1624
1624 1625 def linkrev(self):
1625 1626 # linked to self._changectx no matter if file is modified or not
1626 1627 return self.rev()
1627 1628
1628 1629 def parents(self):
1629 1630 '''return parent filectxs, following copies if necessary'''
1630 1631 def filenode(ctx, path):
1631 1632 return ctx._manifest.get(path, nullid)
1632 1633
1633 1634 path = self._path
1634 1635 fl = self._filelog
1635 1636 pcl = self._changectx._parents
1636 1637 renamed = self.renamed()
1637 1638
1638 1639 if renamed:
1639 1640 pl = [renamed + (None,)]
1640 1641 else:
1641 1642 pl = [(path, filenode(pcl[0], path), fl)]
1642 1643
1643 1644 for pc in pcl[1:]:
1644 1645 pl.append((path, filenode(pc, path), fl))
1645 1646
1646 1647 return [self._parentfilectx(p, fileid=n, filelog=l)
1647 1648 for p, n, l in pl if n != nullid]
1648 1649
1649 1650 def children(self):
1650 1651 return []
1651 1652
1652 1653 class workingfilectx(committablefilectx):
1653 1654 """A workingfilectx object makes access to data related to a particular
1654 1655 file in the working directory convenient."""
1655 1656 def __init__(self, repo, path, filelog=None, workingctx=None):
1656 1657 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1657 1658
1658 1659 @propertycache
1659 1660 def _changectx(self):
1660 1661 return workingctx(self._repo)
1661 1662
1662 1663 def data(self):
1663 1664 return self._repo.wread(self._path)
1664 1665 def renamed(self):
1665 1666 rp = self._repo.dirstate.copied(self._path)
1666 1667 if not rp:
1667 1668 return None
1668 1669 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1669 1670
1670 1671 def size(self):
1671 1672 return self._repo.wvfs.lstat(self._path).st_size
1672 1673 def date(self):
1673 1674 t, tz = self._changectx.date()
1674 1675 try:
1675 1676 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1676 1677 except OSError, err:
1677 1678 if err.errno != errno.ENOENT:
1678 1679 raise
1679 1680 return (t, tz)
1680 1681
1681 1682 def cmp(self, fctx):
1682 1683 """compare with other file context
1683 1684
1684 1685 returns True if different than fctx.
1685 1686 """
1686 1687 # fctx should be a filectx (not a workingfilectx)
1687 1688 # invert comparison to reuse the same code path
1688 1689 return fctx.cmp(self)
1689 1690
1690 1691 def remove(self, ignoremissing=False):
1691 1692 """wraps unlink for a repo's working directory"""
1692 1693 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1693 1694
1694 1695 def write(self, data, flags):
1695 1696 """wraps repo.wwrite"""
1696 1697 self._repo.wwrite(self._path, data, flags)
1697 1698
1698 1699 class workingcommitctx(workingctx):
1699 1700 """A workingcommitctx object makes access to data related to
1700 1701 the revision being committed convenient.
1701 1702
1702 1703 This hides changes in the working directory, if they aren't
1703 1704 committed in this context.
1704 1705 """
1705 1706 def __init__(self, repo, changes,
1706 1707 text="", user=None, date=None, extra=None):
1707 1708 super(workingctx, self).__init__(repo, text, user, date, extra,
1708 1709 changes)
1709 1710
1710 1711 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1711 1712 unknown=False):
1712 1713 """Return matched files only in ``self._status``
1713 1714
1714 1715 Uncommitted files appear "clean" via this context, even if
1715 1716 they aren't actually so in the working directory.
1716 1717 """
1717 1718 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1718 1719 if clean:
1719 1720 clean = [f for f in self._manifest if f not in self._changedset]
1720 1721 else:
1721 1722 clean = []
1722 1723 return scmutil.status([f for f in self._status.modified if match(f)],
1723 1724 [f for f in self._status.added if match(f)],
1724 1725 [f for f in self._status.removed if match(f)],
1725 1726 [], [], [], clean)
1726 1727
1727 1728 @propertycache
1728 1729 def _changedset(self):
1729 1730 """Return the set of files changed in this context
1730 1731 """
1731 1732 changed = set(self._status.modified)
1732 1733 changed.update(self._status.added)
1733 1734 changed.update(self._status.removed)
1734 1735 return changed
1735 1736
1736 1737 class memctx(committablectx):
1737 1738 """Use memctx to perform in-memory commits via localrepo.commitctx().
1738 1739
1739 1740 Revision information is supplied at initialization time while
1740 1741 related files data and is made available through a callback
1741 1742 mechanism. 'repo' is the current localrepo, 'parents' is a
1742 1743 sequence of two parent revisions identifiers (pass None for every
1743 1744 missing parent), 'text' is the commit message and 'files' lists
1744 1745 names of files touched by the revision (normalized and relative to
1745 1746 repository root).
1746 1747
1747 1748 filectxfn(repo, memctx, path) is a callable receiving the
1748 1749 repository, the current memctx object and the normalized path of
1749 1750 requested file, relative to repository root. It is fired by the
1750 1751 commit function for every file in 'files', but calls order is
1751 1752 undefined. If the file is available in the revision being
1752 1753 committed (updated or added), filectxfn returns a memfilectx
1753 1754 object. If the file was removed, filectxfn raises an
1754 1755 IOError. Moved files are represented by marking the source file
1755 1756 removed and the new file added with copy information (see
1756 1757 memfilectx).
1757 1758
1758 1759 user receives the committer name and defaults to current
1759 1760 repository username, date is the commit date in any format
1760 1761 supported by util.parsedate() and defaults to current date, extra
1761 1762 is a dictionary of metadata or is left empty.
1762 1763 """
1763 1764
1764 1765 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1765 1766 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1766 1767 # this field to determine what to do in filectxfn.
1767 1768 _returnnoneformissingfiles = True
1768 1769
1769 1770 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1770 1771 date=None, extra=None, editor=False):
1771 1772 super(memctx, self).__init__(repo, text, user, date, extra)
1772 1773 self._rev = None
1773 1774 self._node = None
1774 1775 parents = [(p or nullid) for p in parents]
1775 1776 p1, p2 = parents
1776 1777 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1777 1778 files = sorted(set(files))
1778 1779 self._files = files
1779 1780 self.substate = {}
1780 1781
1781 1782 # if store is not callable, wrap it in a function
1782 1783 if not callable(filectxfn):
1783 1784 def getfilectx(repo, memctx, path):
1784 1785 fctx = filectxfn[path]
1785 1786 # this is weird but apparently we only keep track of one parent
1786 1787 # (why not only store that instead of a tuple?)
1787 1788 copied = fctx.renamed()
1788 1789 if copied:
1789 1790 copied = copied[0]
1790 1791 return memfilectx(repo, path, fctx.data(),
1791 1792 islink=fctx.islink(), isexec=fctx.isexec(),
1792 1793 copied=copied, memctx=memctx)
1793 1794 self._filectxfn = getfilectx
1794 1795 else:
1795 1796 # "util.cachefunc" reduces invocation of possibly expensive
1796 1797 # "filectxfn" for performance (e.g. converting from another VCS)
1797 1798 self._filectxfn = util.cachefunc(filectxfn)
1798 1799
1799 1800 if extra:
1800 1801 self._extra = extra.copy()
1801 1802 else:
1802 1803 self._extra = {}
1803 1804
1804 1805 if self._extra.get('branch', '') == '':
1805 1806 self._extra['branch'] = 'default'
1806 1807
1807 1808 if editor:
1808 1809 self._text = editor(self._repo, self, [])
1809 1810 self._repo.savecommitmessage(self._text)
1810 1811
1811 1812 def filectx(self, path, filelog=None):
1812 1813 """get a file context from the working directory
1813 1814
1814 1815 Returns None if file doesn't exist and should be removed."""
1815 1816 return self._filectxfn(self._repo, self, path)
1816 1817
1817 1818 def commit(self):
1818 1819 """commit context to the repo"""
1819 1820 return self._repo.commitctx(self)
1820 1821
1821 1822 @propertycache
1822 1823 def _manifest(self):
1823 1824 """generate a manifest based on the return values of filectxfn"""
1824 1825
1825 1826 # keep this simple for now; just worry about p1
1826 1827 pctx = self._parents[0]
1827 1828 man = pctx.manifest().copy()
1828 1829
1829 1830 for f in self._status.modified:
1830 1831 p1node = nullid
1831 1832 p2node = nullid
1832 1833 p = pctx[f].parents() # if file isn't in pctx, check p2?
1833 1834 if len(p) > 0:
1834 1835 p1node = p[0].node()
1835 1836 if len(p) > 1:
1836 1837 p2node = p[1].node()
1837 1838 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1838 1839
1839 1840 for f in self._status.added:
1840 1841 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1841 1842
1842 1843 for f in self._status.removed:
1843 1844 if f in man:
1844 1845 del man[f]
1845 1846
1846 1847 return man
1847 1848
1848 1849 @propertycache
1849 1850 def _status(self):
1850 1851 """Calculate exact status from ``files`` specified at construction
1851 1852 """
1852 1853 man1 = self.p1().manifest()
1853 1854 p2 = self._parents[1]
1854 1855 # "1 < len(self._parents)" can't be used for checking
1855 1856 # existence of the 2nd parent, because "memctx._parents" is
1856 1857 # explicitly initialized by the list, of which length is 2.
1857 1858 if p2.node() != nullid:
1858 1859 man2 = p2.manifest()
1859 1860 managing = lambda f: f in man1 or f in man2
1860 1861 else:
1861 1862 managing = lambda f: f in man1
1862 1863
1863 1864 modified, added, removed = [], [], []
1864 1865 for f in self._files:
1865 1866 if not managing(f):
1866 1867 added.append(f)
1867 1868 elif self[f]:
1868 1869 modified.append(f)
1869 1870 else:
1870 1871 removed.append(f)
1871 1872
1872 1873 return scmutil.status(modified, added, removed, [], [], [], [])
1873 1874
1874 1875 class memfilectx(committablefilectx):
1875 1876 """memfilectx represents an in-memory file to commit.
1876 1877
1877 1878 See memctx and committablefilectx for more details.
1878 1879 """
1879 1880 def __init__(self, repo, path, data, islink=False,
1880 1881 isexec=False, copied=None, memctx=None):
1881 1882 """
1882 1883 path is the normalized file path relative to repository root.
1883 1884 data is the file content as a string.
1884 1885 islink is True if the file is a symbolic link.
1885 1886 isexec is True if the file is executable.
1886 1887 copied is the source file path if current file was copied in the
1887 1888 revision being committed, or None."""
1888 1889 super(memfilectx, self).__init__(repo, path, None, memctx)
1889 1890 self._data = data
1890 1891 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1891 1892 self._copied = None
1892 1893 if copied:
1893 1894 self._copied = (copied, nullid)
1894 1895
1895 1896 def data(self):
1896 1897 return self._data
1897 1898 def size(self):
1898 1899 return len(self.data())
1899 1900 def flags(self):
1900 1901 return self._flags
1901 1902 def renamed(self):
1902 1903 return self._copied
1903 1904
1904 1905 def remove(self, ignoremissing=False):
1905 1906 """wraps unlink for a repo's working directory"""
1906 1907 # need to figure out what to do here
1907 1908 del self._changectx[self._path]
1908 1909
1909 1910 def write(self, data, flags):
1910 1911 """wraps repo.wwrite"""
1911 1912 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now