##// END OF EJS Templates
context: introduce the nullsub() method...
Matt Harbison -
r25417:95c27135 default
parent child Browse files
Show More
@@ -1,1910 +1,1913 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex, bin
9 9 from i18n import _
10 10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 11 import match as matchmod
12 12 import copy, os, errno, stat
13 13 import obsolete as obsmod
14 14 import repoview
15 15 import fileset
16 16 import revlog
17 17
18 18 propertycache = util.propertycache
19 19
20 20 # Phony node value to stand-in for new files in some uses of
21 21 # manifests. Manifests support 21-byte hashes for nodes which are
22 22 # dirty in the working copy.
23 23 _newnode = '!' * 21
24 24
25 25 class basectx(object):
26 26 """A basectx object represents the common logic for its children:
27 27 changectx: read-only context that is already present in the repo,
28 28 workingctx: a context that represents the working directory and can
29 29 be committed,
30 30 memctx: a context that represents changes in-memory and can also
31 31 be committed."""
32 32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 33 if isinstance(changeid, basectx):
34 34 return changeid
35 35
36 36 o = super(basectx, cls).__new__(cls)
37 37
38 38 o._repo = repo
39 39 o._rev = nullrev
40 40 o._node = nullid
41 41
42 42 return o
43 43
44 44 def __str__(self):
45 45 return short(self.node())
46 46
47 47 def __int__(self):
48 48 return self.rev()
49 49
50 50 def __repr__(self):
51 51 return "<%s %s>" % (type(self).__name__, str(self))
52 52
53 53 def __eq__(self, other):
54 54 try:
55 55 return type(self) == type(other) and self._rev == other._rev
56 56 except AttributeError:
57 57 return False
58 58
59 59 def __ne__(self, other):
60 60 return not (self == other)
61 61
62 62 def __contains__(self, key):
63 63 return key in self._manifest
64 64
65 65 def __getitem__(self, key):
66 66 return self.filectx(key)
67 67
68 68 def __iter__(self):
69 69 return iter(self._manifest)
70 70
71 71 def _manifestmatches(self, match, s):
72 72 """generate a new manifest filtered by the match argument
73 73
74 74 This method is for internal use only and mainly exists to provide an
75 75 object oriented way for other contexts to customize the manifest
76 76 generation.
77 77 """
78 78 return self.manifest().matches(match)
79 79
80 80 def _matchstatus(self, other, match):
81 81 """return match.always if match is none
82 82
83 83 This internal method provides a way for child objects to override the
84 84 match operator.
85 85 """
86 86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 87
88 88 def _buildstatus(self, other, s, match, listignored, listclean,
89 89 listunknown):
90 90 """build a status with respect to another context"""
91 91 # Load earliest manifest first for caching reasons. More specifically,
92 92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 95 # delta to what's in the cache. So that's one full reconstruction + one
96 96 # delta application.
97 97 if self.rev() is not None and self.rev() < other.rev():
98 98 self.manifest()
99 99 mf1 = other._manifestmatches(match, s)
100 100 mf2 = self._manifestmatches(match, s)
101 101
102 102 modified, added = [], []
103 103 removed = []
104 104 clean = []
105 105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 106 deletedset = set(deleted)
107 107 d = mf1.diff(mf2, clean=listclean)
108 108 for fn, value in d.iteritems():
109 109 if fn in deletedset:
110 110 continue
111 111 if value is None:
112 112 clean.append(fn)
113 113 continue
114 114 (node1, flag1), (node2, flag2) = value
115 115 if node1 is None:
116 116 added.append(fn)
117 117 elif node2 is None:
118 118 removed.append(fn)
119 119 elif node2 != _newnode:
120 120 # The file was not a new file in mf2, so an entry
121 121 # from diff is really a difference.
122 122 modified.append(fn)
123 123 elif self[fn].cmp(other[fn]):
124 124 # node2 was newnode, but the working file doesn't
125 125 # match the one in mf1.
126 126 modified.append(fn)
127 127 else:
128 128 clean.append(fn)
129 129
130 130 if removed:
131 131 # need to filter files if they are already reported as removed
132 132 unknown = [fn for fn in unknown if fn not in mf1]
133 133 ignored = [fn for fn in ignored if fn not in mf1]
134 134 # if they're deleted, don't report them as removed
135 135 removed = [fn for fn in removed if fn not in deletedset]
136 136
137 137 return scmutil.status(modified, added, removed, deleted, unknown,
138 138 ignored, clean)
139 139
140 140 @propertycache
141 141 def substate(self):
142 142 return subrepo.state(self, self._repo.ui)
143 143
144 144 def subrev(self, subpath):
145 145 return self.substate[subpath][1]
146 146
147 147 def rev(self):
148 148 return self._rev
149 149 def node(self):
150 150 return self._node
151 151 def hex(self):
152 152 return hex(self.node())
153 153 def manifest(self):
154 154 return self._manifest
155 155 def repo(self):
156 156 return self._repo
157 157 def phasestr(self):
158 158 return phases.phasenames[self.phase()]
159 159 def mutable(self):
160 160 return self.phase() > phases.public
161 161
162 162 def getfileset(self, expr):
163 163 return fileset.getfileset(self, expr)
164 164
165 165 def obsolete(self):
166 166 """True if the changeset is obsolete"""
167 167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
168 168
169 169 def extinct(self):
170 170 """True if the changeset is extinct"""
171 171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
172 172
173 173 def unstable(self):
174 174 """True if the changeset is not obsolete but it's ancestor are"""
175 175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
176 176
177 177 def bumped(self):
178 178 """True if the changeset try to be a successor of a public changeset
179 179
180 180 Only non-public and non-obsolete changesets may be bumped.
181 181 """
182 182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
183 183
184 184 def divergent(self):
185 185 """Is a successors of a changeset with multiple possible successors set
186 186
187 187 Only non-public and non-obsolete changesets may be divergent.
188 188 """
189 189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
190 190
191 191 def troubled(self):
192 192 """True if the changeset is either unstable, bumped or divergent"""
193 193 return self.unstable() or self.bumped() or self.divergent()
194 194
195 195 def troubles(self):
196 196 """return the list of troubles affecting this changesets.
197 197
198 198 Troubles are returned as strings. possible values are:
199 199 - unstable,
200 200 - bumped,
201 201 - divergent.
202 202 """
203 203 troubles = []
204 204 if self.unstable():
205 205 troubles.append('unstable')
206 206 if self.bumped():
207 207 troubles.append('bumped')
208 208 if self.divergent():
209 209 troubles.append('divergent')
210 210 return troubles
211 211
212 212 def parents(self):
213 213 """return contexts for each parent changeset"""
214 214 return self._parents
215 215
216 216 def p1(self):
217 217 return self._parents[0]
218 218
219 219 def p2(self):
220 220 if len(self._parents) == 2:
221 221 return self._parents[1]
222 222 return changectx(self._repo, -1)
223 223
224 224 def _fileinfo(self, path):
225 225 if '_manifest' in self.__dict__:
226 226 try:
227 227 return self._manifest[path], self._manifest.flags(path)
228 228 except KeyError:
229 229 raise error.ManifestLookupError(self._node, path,
230 230 _('not found in manifest'))
231 231 if '_manifestdelta' in self.__dict__ or path in self.files():
232 232 if path in self._manifestdelta:
233 233 return (self._manifestdelta[path],
234 234 self._manifestdelta.flags(path))
235 235 node, flag = self._repo.manifest.find(self._changeset[0], path)
236 236 if not node:
237 237 raise error.ManifestLookupError(self._node, path,
238 238 _('not found in manifest'))
239 239
240 240 return node, flag
241 241
242 242 def filenode(self, path):
243 243 return self._fileinfo(path)[0]
244 244
245 245 def flags(self, path):
246 246 try:
247 247 return self._fileinfo(path)[1]
248 248 except error.LookupError:
249 249 return ''
250 250
251 251 def sub(self, path):
252 252 return subrepo.subrepo(self, path)
253 253
254 def nullsub(self, path, pctx):
255 return subrepo.nullsubrepo(self, path, pctx)
256
254 257 def match(self, pats=[], include=None, exclude=None, default='glob',
255 258 listsubrepos=False):
256 259 r = self._repo
257 260 return matchmod.match(r.root, r.getcwd(), pats,
258 261 include, exclude, default,
259 262 auditor=r.auditor, ctx=self,
260 263 listsubrepos=listsubrepos)
261 264
262 265 def diff(self, ctx2=None, match=None, **opts):
263 266 """Returns a diff generator for the given contexts and matcher"""
264 267 if ctx2 is None:
265 268 ctx2 = self.p1()
266 269 if ctx2 is not None:
267 270 ctx2 = self._repo[ctx2]
268 271 diffopts = patch.diffopts(self._repo.ui, opts)
269 272 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
270 273
271 274 def dirs(self):
272 275 return self._manifest.dirs()
273 276
274 277 def hasdir(self, dir):
275 278 return self._manifest.hasdir(dir)
276 279
277 280 def dirty(self, missing=False, merge=True, branch=True):
278 281 return False
279 282
280 283 def status(self, other=None, match=None, listignored=False,
281 284 listclean=False, listunknown=False, listsubrepos=False):
282 285 """return status of files between two nodes or node and working
283 286 directory.
284 287
285 288 If other is None, compare this node with working directory.
286 289
287 290 returns (modified, added, removed, deleted, unknown, ignored, clean)
288 291 """
289 292
290 293 ctx1 = self
291 294 ctx2 = self._repo[other]
292 295
293 296 # This next code block is, admittedly, fragile logic that tests for
294 297 # reversing the contexts and wouldn't need to exist if it weren't for
295 298 # the fast (and common) code path of comparing the working directory
296 299 # with its first parent.
297 300 #
298 301 # What we're aiming for here is the ability to call:
299 302 #
300 303 # workingctx.status(parentctx)
301 304 #
302 305 # If we always built the manifest for each context and compared those,
303 306 # then we'd be done. But the special case of the above call means we
304 307 # just copy the manifest of the parent.
305 308 reversed = False
306 309 if (not isinstance(ctx1, changectx)
307 310 and isinstance(ctx2, changectx)):
308 311 reversed = True
309 312 ctx1, ctx2 = ctx2, ctx1
310 313
311 314 match = ctx2._matchstatus(ctx1, match)
312 315 r = scmutil.status([], [], [], [], [], [], [])
313 316 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
314 317 listunknown)
315 318
316 319 if reversed:
317 320 # Reverse added and removed. Clear deleted, unknown and ignored as
318 321 # these make no sense to reverse.
319 322 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
320 323 r.clean)
321 324
322 325 if listsubrepos:
323 326 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
324 327 rev2 = ctx2.subrev(subpath)
325 328 try:
326 329 submatch = matchmod.narrowmatcher(subpath, match)
327 330 s = sub.status(rev2, match=submatch, ignored=listignored,
328 331 clean=listclean, unknown=listunknown,
329 332 listsubrepos=True)
330 333 for rfiles, sfiles in zip(r, s):
331 334 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
332 335 except error.LookupError:
333 336 self._repo.ui.status(_("skipping missing "
334 337 "subrepository: %s\n") % subpath)
335 338
336 339 for l in r:
337 340 l.sort()
338 341
339 342 return r
340 343
341 344
342 345 def makememctx(repo, parents, text, user, date, branch, files, store,
343 346 editor=None, extra=None):
344 347 def getfilectx(repo, memctx, path):
345 348 data, mode, copied = store.getfile(path)
346 349 if data is None:
347 350 return None
348 351 islink, isexec = mode
349 352 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
350 353 copied=copied, memctx=memctx)
351 354 if extra is None:
352 355 extra = {}
353 356 if branch:
354 357 extra['branch'] = encoding.fromlocal(branch)
355 358 ctx = memctx(repo, parents, text, files, getfilectx, user,
356 359 date, extra, editor)
357 360 return ctx
358 361
359 362 class changectx(basectx):
360 363 """A changecontext object makes access to data related to a particular
361 364 changeset convenient. It represents a read-only context already present in
362 365 the repo."""
363 366 def __init__(self, repo, changeid=''):
364 367 """changeid is a revision number, node, or tag"""
365 368
366 369 # since basectx.__new__ already took care of copying the object, we
367 370 # don't need to do anything in __init__, so we just exit here
368 371 if isinstance(changeid, basectx):
369 372 return
370 373
371 374 if changeid == '':
372 375 changeid = '.'
373 376 self._repo = repo
374 377
375 378 try:
376 379 if isinstance(changeid, int):
377 380 self._node = repo.changelog.node(changeid)
378 381 self._rev = changeid
379 382 return
380 383 if isinstance(changeid, long):
381 384 changeid = str(changeid)
382 385 if changeid == 'null':
383 386 self._node = nullid
384 387 self._rev = nullrev
385 388 return
386 389 if changeid == 'tip':
387 390 self._node = repo.changelog.tip()
388 391 self._rev = repo.changelog.rev(self._node)
389 392 return
390 393 if changeid == '.' or changeid == repo.dirstate.p1():
391 394 # this is a hack to delay/avoid loading obsmarkers
392 395 # when we know that '.' won't be hidden
393 396 self._node = repo.dirstate.p1()
394 397 self._rev = repo.unfiltered().changelog.rev(self._node)
395 398 return
396 399 if len(changeid) == 20:
397 400 try:
398 401 self._node = changeid
399 402 self._rev = repo.changelog.rev(changeid)
400 403 return
401 404 except error.FilteredRepoLookupError:
402 405 raise
403 406 except LookupError:
404 407 pass
405 408
406 409 try:
407 410 r = int(changeid)
408 411 if str(r) != changeid:
409 412 raise ValueError
410 413 l = len(repo.changelog)
411 414 if r < 0:
412 415 r += l
413 416 if r < 0 or r >= l:
414 417 raise ValueError
415 418 self._rev = r
416 419 self._node = repo.changelog.node(r)
417 420 return
418 421 except error.FilteredIndexError:
419 422 raise
420 423 except (ValueError, OverflowError, IndexError):
421 424 pass
422 425
423 426 if len(changeid) == 40:
424 427 try:
425 428 self._node = bin(changeid)
426 429 self._rev = repo.changelog.rev(self._node)
427 430 return
428 431 except error.FilteredLookupError:
429 432 raise
430 433 except (TypeError, LookupError):
431 434 pass
432 435
433 436 # lookup bookmarks through the name interface
434 437 try:
435 438 self._node = repo.names.singlenode(repo, changeid)
436 439 self._rev = repo.changelog.rev(self._node)
437 440 return
438 441 except KeyError:
439 442 pass
440 443 except error.FilteredRepoLookupError:
441 444 raise
442 445 except error.RepoLookupError:
443 446 pass
444 447
445 448 self._node = repo.unfiltered().changelog._partialmatch(changeid)
446 449 if self._node is not None:
447 450 self._rev = repo.changelog.rev(self._node)
448 451 return
449 452
450 453 # lookup failed
451 454 # check if it might have come from damaged dirstate
452 455 #
453 456 # XXX we could avoid the unfiltered if we had a recognizable
454 457 # exception for filtered changeset access
455 458 if changeid in repo.unfiltered().dirstate.parents():
456 459 msg = _("working directory has unknown parent '%s'!")
457 460 raise error.Abort(msg % short(changeid))
458 461 try:
459 462 if len(changeid) == 20:
460 463 changeid = hex(changeid)
461 464 except TypeError:
462 465 pass
463 466 except (error.FilteredIndexError, error.FilteredLookupError,
464 467 error.FilteredRepoLookupError):
465 468 if repo.filtername.startswith('visible'):
466 469 msg = _("hidden revision '%s'") % changeid
467 470 hint = _('use --hidden to access hidden revisions')
468 471 raise error.FilteredRepoLookupError(msg, hint=hint)
469 472 msg = _("filtered revision '%s' (not in '%s' subset)")
470 473 msg %= (changeid, repo.filtername)
471 474 raise error.FilteredRepoLookupError(msg)
472 475 except IndexError:
473 476 pass
474 477 raise error.RepoLookupError(
475 478 _("unknown revision '%s'") % changeid)
476 479
477 480 def __hash__(self):
478 481 try:
479 482 return hash(self._rev)
480 483 except AttributeError:
481 484 return id(self)
482 485
483 486 def __nonzero__(self):
484 487 return self._rev != nullrev
485 488
486 489 @propertycache
487 490 def _changeset(self):
488 491 return self._repo.changelog.read(self.rev())
489 492
490 493 @propertycache
491 494 def _manifest(self):
492 495 return self._repo.manifest.read(self._changeset[0])
493 496
494 497 @propertycache
495 498 def _manifestdelta(self):
496 499 return self._repo.manifest.readdelta(self._changeset[0])
497 500
498 501 @propertycache
499 502 def _parents(self):
500 503 p = self._repo.changelog.parentrevs(self._rev)
501 504 if p[1] == nullrev:
502 505 p = p[:-1]
503 506 return [changectx(self._repo, x) for x in p]
504 507
505 508 def changeset(self):
506 509 return self._changeset
507 510 def manifestnode(self):
508 511 return self._changeset[0]
509 512
510 513 def user(self):
511 514 return self._changeset[1]
512 515 def date(self):
513 516 return self._changeset[2]
514 517 def files(self):
515 518 return self._changeset[3]
516 519 def description(self):
517 520 return self._changeset[4]
518 521 def branch(self):
519 522 return encoding.tolocal(self._changeset[5].get("branch"))
520 523 def closesbranch(self):
521 524 return 'close' in self._changeset[5]
522 525 def extra(self):
523 526 return self._changeset[5]
524 527 def tags(self):
525 528 return self._repo.nodetags(self._node)
526 529 def bookmarks(self):
527 530 return self._repo.nodebookmarks(self._node)
528 531 def phase(self):
529 532 return self._repo._phasecache.phase(self._repo, self._rev)
530 533 def hidden(self):
531 534 return self._rev in repoview.filterrevs(self._repo, 'visible')
532 535
533 536 def children(self):
534 537 """return contexts for each child changeset"""
535 538 c = self._repo.changelog.children(self._node)
536 539 return [changectx(self._repo, x) for x in c]
537 540
538 541 def ancestors(self):
539 542 for a in self._repo.changelog.ancestors([self._rev]):
540 543 yield changectx(self._repo, a)
541 544
542 545 def descendants(self):
543 546 for d in self._repo.changelog.descendants([self._rev]):
544 547 yield changectx(self._repo, d)
545 548
546 549 def filectx(self, path, fileid=None, filelog=None):
547 550 """get a file context from this changeset"""
548 551 if fileid is None:
549 552 fileid = self.filenode(path)
550 553 return filectx(self._repo, path, fileid=fileid,
551 554 changectx=self, filelog=filelog)
552 555
553 556 def ancestor(self, c2, warn=False):
554 557 """return the "best" ancestor context of self and c2
555 558
556 559 If there are multiple candidates, it will show a message and check
557 560 merge.preferancestor configuration before falling back to the
558 561 revlog ancestor."""
559 562 # deal with workingctxs
560 563 n2 = c2._node
561 564 if n2 is None:
562 565 n2 = c2._parents[0]._node
563 566 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
564 567 if not cahs:
565 568 anc = nullid
566 569 elif len(cahs) == 1:
567 570 anc = cahs[0]
568 571 else:
569 572 for r in self._repo.ui.configlist('merge', 'preferancestor'):
570 573 try:
571 574 ctx = changectx(self._repo, r)
572 575 except error.RepoLookupError:
573 576 continue
574 577 anc = ctx.node()
575 578 if anc in cahs:
576 579 break
577 580 else:
578 581 anc = self._repo.changelog.ancestor(self._node, n2)
579 582 if warn:
580 583 self._repo.ui.status(
581 584 (_("note: using %s as ancestor of %s and %s\n") %
582 585 (short(anc), short(self._node), short(n2))) +
583 586 ''.join(_(" alternatively, use --config "
584 587 "merge.preferancestor=%s\n") %
585 588 short(n) for n in sorted(cahs) if n != anc))
586 589 return changectx(self._repo, anc)
587 590
588 591 def descendant(self, other):
589 592 """True if other is descendant of this changeset"""
590 593 return self._repo.changelog.descendant(self._rev, other._rev)
591 594
592 595 def walk(self, match):
593 596 '''Generates matching file names.'''
594 597
595 598 # Override match.bad method to have message with nodeid
596 599 match = copy.copy(match)
597 600 oldbad = match.bad
598 601 def bad(fn, msg):
599 602 # The manifest doesn't know about subrepos, so don't complain about
600 603 # paths into valid subrepos.
601 604 if any(fn == s or fn.startswith(s + '/')
602 605 for s in self.substate):
603 606 return
604 607 oldbad(fn, _('no such file in rev %s') % self)
605 608 match.bad = bad
606 609
607 610 return self._manifest.walk(match)
608 611
609 612 def matches(self, match):
610 613 return self.walk(match)
611 614
612 615 class basefilectx(object):
613 616 """A filecontext object represents the common logic for its children:
614 617 filectx: read-only access to a filerevision that is already present
615 618 in the repo,
616 619 workingfilectx: a filecontext that represents files from the working
617 620 directory,
618 621 memfilectx: a filecontext that represents files in-memory."""
619 622 def __new__(cls, repo, path, *args, **kwargs):
620 623 return super(basefilectx, cls).__new__(cls)
621 624
622 625 @propertycache
623 626 def _filelog(self):
624 627 return self._repo.file(self._path)
625 628
626 629 @propertycache
627 630 def _changeid(self):
628 631 if '_changeid' in self.__dict__:
629 632 return self._changeid
630 633 elif '_changectx' in self.__dict__:
631 634 return self._changectx.rev()
632 635 elif '_descendantrev' in self.__dict__:
633 636 # this file context was created from a revision with a known
634 637 # descendant, we can (lazily) correct for linkrev aliases
635 638 return self._adjustlinkrev(self._path, self._filelog,
636 639 self._filenode, self._descendantrev)
637 640 else:
638 641 return self._filelog.linkrev(self._filerev)
639 642
640 643 @propertycache
641 644 def _filenode(self):
642 645 if '_fileid' in self.__dict__:
643 646 return self._filelog.lookup(self._fileid)
644 647 else:
645 648 return self._changectx.filenode(self._path)
646 649
647 650 @propertycache
648 651 def _filerev(self):
649 652 return self._filelog.rev(self._filenode)
650 653
651 654 @propertycache
652 655 def _repopath(self):
653 656 return self._path
654 657
655 658 def __nonzero__(self):
656 659 try:
657 660 self._filenode
658 661 return True
659 662 except error.LookupError:
660 663 # file is missing
661 664 return False
662 665
663 666 def __str__(self):
664 667 return "%s@%s" % (self.path(), self._changectx)
665 668
666 669 def __repr__(self):
667 670 return "<%s %s>" % (type(self).__name__, str(self))
668 671
669 672 def __hash__(self):
670 673 try:
671 674 return hash((self._path, self._filenode))
672 675 except AttributeError:
673 676 return id(self)
674 677
675 678 def __eq__(self, other):
676 679 try:
677 680 return (type(self) == type(other) and self._path == other._path
678 681 and self._filenode == other._filenode)
679 682 except AttributeError:
680 683 return False
681 684
682 685 def __ne__(self, other):
683 686 return not (self == other)
684 687
685 688 def filerev(self):
686 689 return self._filerev
687 690 def filenode(self):
688 691 return self._filenode
689 692 def flags(self):
690 693 return self._changectx.flags(self._path)
691 694 def filelog(self):
692 695 return self._filelog
693 696 def rev(self):
694 697 return self._changeid
695 698 def linkrev(self):
696 699 return self._filelog.linkrev(self._filerev)
697 700 def node(self):
698 701 return self._changectx.node()
699 702 def hex(self):
700 703 return self._changectx.hex()
701 704 def user(self):
702 705 return self._changectx.user()
703 706 def date(self):
704 707 return self._changectx.date()
705 708 def files(self):
706 709 return self._changectx.files()
707 710 def description(self):
708 711 return self._changectx.description()
709 712 def branch(self):
710 713 return self._changectx.branch()
711 714 def extra(self):
712 715 return self._changectx.extra()
713 716 def phase(self):
714 717 return self._changectx.phase()
715 718 def phasestr(self):
716 719 return self._changectx.phasestr()
717 720 def manifest(self):
718 721 return self._changectx.manifest()
719 722 def changectx(self):
720 723 return self._changectx
721 724 def repo(self):
722 725 return self._repo
723 726
724 727 def path(self):
725 728 return self._path
726 729
727 730 def isbinary(self):
728 731 try:
729 732 return util.binary(self.data())
730 733 except IOError:
731 734 return False
732 735 def isexec(self):
733 736 return 'x' in self.flags()
734 737 def islink(self):
735 738 return 'l' in self.flags()
736 739
737 740 def cmp(self, fctx):
738 741 """compare with other file context
739 742
740 743 returns True if different than fctx.
741 744 """
742 745 if (fctx._filerev is None
743 746 and (self._repo._encodefilterpats
744 747 # if file data starts with '\1\n', empty metadata block is
745 748 # prepended, which adds 4 bytes to filelog.size().
746 749 or self.size() - 4 == fctx.size())
747 750 or self.size() == fctx.size()):
748 751 return self._filelog.cmp(self._filenode, fctx.data())
749 752
750 753 return True
751 754
752 755 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
753 756 """return the first ancestor of <srcrev> introducing <fnode>
754 757
755 758 If the linkrev of the file revision does not point to an ancestor of
756 759 srcrev, we'll walk down the ancestors until we find one introducing
757 760 this file revision.
758 761
759 762 :repo: a localrepository object (used to access changelog and manifest)
760 763 :path: the file path
761 764 :fnode: the nodeid of the file revision
762 765 :filelog: the filelog of this path
763 766 :srcrev: the changeset revision we search ancestors from
764 767 :inclusive: if true, the src revision will also be checked
765 768 """
766 769 repo = self._repo
767 770 cl = repo.unfiltered().changelog
768 771 ma = repo.manifest
769 772 # fetch the linkrev
770 773 fr = filelog.rev(fnode)
771 774 lkr = filelog.linkrev(fr)
772 775 # hack to reuse ancestor computation when searching for renames
773 776 memberanc = getattr(self, '_ancestrycontext', None)
774 777 iteranc = None
775 778 if srcrev is None:
776 779 # wctx case, used by workingfilectx during mergecopy
777 780 revs = [p.rev() for p in self._repo[None].parents()]
778 781 inclusive = True # we skipped the real (revless) source
779 782 else:
780 783 revs = [srcrev]
781 784 if memberanc is None:
782 785 memberanc = iteranc = cl.ancestors(revs, lkr,
783 786 inclusive=inclusive)
784 787 # check if this linkrev is an ancestor of srcrev
785 788 if lkr not in memberanc:
786 789 if iteranc is None:
787 790 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
788 791 for a in iteranc:
789 792 ac = cl.read(a) # get changeset data (we avoid object creation)
790 793 if path in ac[3]: # checking the 'files' field.
791 794 # The file has been touched, check if the content is
792 795 # similar to the one we search for.
793 796 if fnode == ma.readfast(ac[0]).get(path):
794 797 return a
795 798 # In theory, we should never get out of that loop without a result.
796 799 # But if manifest uses a buggy file revision (not children of the
797 800 # one it replaces) we could. Such a buggy situation will likely
798 801 # result is crash somewhere else at to some point.
799 802 return lkr
800 803
801 804 def introrev(self):
802 805 """return the rev of the changeset which introduced this file revision
803 806
804 807 This method is different from linkrev because it take into account the
805 808 changeset the filectx was created from. It ensures the returned
806 809 revision is one of its ancestors. This prevents bugs from
807 810 'linkrev-shadowing' when a file revision is used by multiple
808 811 changesets.
809 812 """
810 813 lkr = self.linkrev()
811 814 attrs = vars(self)
812 815 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
813 816 if noctx or self.rev() == lkr:
814 817 return self.linkrev()
815 818 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
816 819 self.rev(), inclusive=True)
817 820
818 821 def _parentfilectx(self, path, fileid, filelog):
819 822 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
820 823 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
821 824 if '_changeid' in vars(self) or '_changectx' in vars(self):
822 825 # If self is associated with a changeset (probably explicitly
823 826 # fed), ensure the created filectx is associated with a
824 827 # changeset that is an ancestor of self.changectx.
825 828 # This lets us later use _adjustlinkrev to get a correct link.
826 829 fctx._descendantrev = self.rev()
827 830 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
828 831 elif '_descendantrev' in vars(self):
829 832 # Otherwise propagate _descendantrev if we have one associated.
830 833 fctx._descendantrev = self._descendantrev
831 834 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
832 835 return fctx
833 836
834 837 def parents(self):
835 838 _path = self._path
836 839 fl = self._filelog
837 840 parents = self._filelog.parents(self._filenode)
838 841 pl = [(_path, node, fl) for node in parents if node != nullid]
839 842
840 843 r = fl.renamed(self._filenode)
841 844 if r:
842 845 # - In the simple rename case, both parent are nullid, pl is empty.
843 846 # - In case of merge, only one of the parent is null id and should
844 847 # be replaced with the rename information. This parent is -always-
845 848 # the first one.
846 849 #
847 850 # As null id have always been filtered out in the previous list
848 851 # comprehension, inserting to 0 will always result in "replacing
849 852 # first nullid parent with rename information.
850 853 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
851 854
852 855 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
853 856
854 857 def p1(self):
855 858 return self.parents()[0]
856 859
857 860 def p2(self):
858 861 p = self.parents()
859 862 if len(p) == 2:
860 863 return p[1]
861 864 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
862 865
863 866 def annotate(self, follow=False, linenumber=None, diffopts=None):
864 867 '''returns a list of tuples of (ctx, line) for each line
865 868 in the file, where ctx is the filectx of the node where
866 869 that line was last changed.
867 870 This returns tuples of ((ctx, linenumber), line) for each line,
868 871 if "linenumber" parameter is NOT "None".
869 872 In such tuples, linenumber means one at the first appearance
870 873 in the managed file.
871 874 To reduce annotation cost,
872 875 this returns fixed value(False is used) as linenumber,
873 876 if "linenumber" parameter is "False".'''
874 877
875 878 if linenumber is None:
876 879 def decorate(text, rev):
877 880 return ([rev] * len(text.splitlines()), text)
878 881 elif linenumber:
879 882 def decorate(text, rev):
880 883 size = len(text.splitlines())
881 884 return ([(rev, i) for i in xrange(1, size + 1)], text)
882 885 else:
883 886 def decorate(text, rev):
884 887 return ([(rev, False)] * len(text.splitlines()), text)
885 888
886 889 def pair(parent, child):
887 890 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
888 891 refine=True)
889 892 for (a1, a2, b1, b2), t in blocks:
890 893 # Changed blocks ('!') or blocks made only of blank lines ('~')
891 894 # belong to the child.
892 895 if t == '=':
893 896 child[0][b1:b2] = parent[0][a1:a2]
894 897 return child
895 898
896 899 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
897 900
898 901 def parents(f):
899 902 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
900 903 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
901 904 # from the topmost introrev (= srcrev) down to p.linkrev() if it
902 905 # isn't an ancestor of the srcrev.
903 906 f._changeid
904 907 pl = f.parents()
905 908
906 909 # Don't return renamed parents if we aren't following.
907 910 if not follow:
908 911 pl = [p for p in pl if p.path() == f.path()]
909 912
910 913 # renamed filectx won't have a filelog yet, so set it
911 914 # from the cache to save time
912 915 for p in pl:
913 916 if not '_filelog' in p.__dict__:
914 917 p._filelog = getlog(p.path())
915 918
916 919 return pl
917 920
918 921 # use linkrev to find the first changeset where self appeared
919 922 base = self
920 923 introrev = self.introrev()
921 924 if self.rev() != introrev:
922 925 base = self.filectx(self.filenode(), changeid=introrev)
923 926 if getattr(base, '_ancestrycontext', None) is None:
924 927 cl = self._repo.changelog
925 928 if introrev is None:
926 929 # wctx is not inclusive, but works because _ancestrycontext
927 930 # is used to test filelog revisions
928 931 ac = cl.ancestors([p.rev() for p in base.parents()],
929 932 inclusive=True)
930 933 else:
931 934 ac = cl.ancestors([introrev], inclusive=True)
932 935 base._ancestrycontext = ac
933 936
934 937 # This algorithm would prefer to be recursive, but Python is a
935 938 # bit recursion-hostile. Instead we do an iterative
936 939 # depth-first search.
937 940
938 941 visit = [base]
939 942 hist = {}
940 943 pcache = {}
941 944 needed = {base: 1}
942 945 while visit:
943 946 f = visit[-1]
944 947 pcached = f in pcache
945 948 if not pcached:
946 949 pcache[f] = parents(f)
947 950
948 951 ready = True
949 952 pl = pcache[f]
950 953 for p in pl:
951 954 if p not in hist:
952 955 ready = False
953 956 visit.append(p)
954 957 if not pcached:
955 958 needed[p] = needed.get(p, 0) + 1
956 959 if ready:
957 960 visit.pop()
958 961 reusable = f in hist
959 962 if reusable:
960 963 curr = hist[f]
961 964 else:
962 965 curr = decorate(f.data(), f)
963 966 for p in pl:
964 967 if not reusable:
965 968 curr = pair(hist[p], curr)
966 969 if needed[p] == 1:
967 970 del hist[p]
968 971 del needed[p]
969 972 else:
970 973 needed[p] -= 1
971 974
972 975 hist[f] = curr
973 976 pcache[f] = []
974 977
975 978 return zip(hist[base][0], hist[base][1].splitlines(True))
976 979
977 980 def ancestors(self, followfirst=False):
978 981 visit = {}
979 982 c = self
980 983 if followfirst:
981 984 cut = 1
982 985 else:
983 986 cut = None
984 987
985 988 while True:
986 989 for parent in c.parents()[:cut]:
987 990 visit[(parent.linkrev(), parent.filenode())] = parent
988 991 if not visit:
989 992 break
990 993 c = visit.pop(max(visit))
991 994 yield c
992 995
993 996 class filectx(basefilectx):
994 997 """A filecontext object makes access to data related to a particular
995 998 filerevision convenient."""
996 999 def __init__(self, repo, path, changeid=None, fileid=None,
997 1000 filelog=None, changectx=None):
998 1001 """changeid can be a changeset revision, node, or tag.
999 1002 fileid can be a file revision or node."""
1000 1003 self._repo = repo
1001 1004 self._path = path
1002 1005
1003 1006 assert (changeid is not None
1004 1007 or fileid is not None
1005 1008 or changectx is not None), \
1006 1009 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1007 1010 % (changeid, fileid, changectx))
1008 1011
1009 1012 if filelog is not None:
1010 1013 self._filelog = filelog
1011 1014
1012 1015 if changeid is not None:
1013 1016 self._changeid = changeid
1014 1017 if changectx is not None:
1015 1018 self._changectx = changectx
1016 1019 if fileid is not None:
1017 1020 self._fileid = fileid
1018 1021
1019 1022 @propertycache
1020 1023 def _changectx(self):
1021 1024 try:
1022 1025 return changectx(self._repo, self._changeid)
1023 1026 except error.FilteredRepoLookupError:
1024 1027 # Linkrev may point to any revision in the repository. When the
1025 1028 # repository is filtered this may lead to `filectx` trying to build
1026 1029 # `changectx` for filtered revision. In such case we fallback to
1027 1030 # creating `changectx` on the unfiltered version of the reposition.
1028 1031 # This fallback should not be an issue because `changectx` from
1029 1032 # `filectx` are not used in complex operations that care about
1030 1033 # filtering.
1031 1034 #
1032 1035 # This fallback is a cheap and dirty fix that prevent several
1033 1036 # crashes. It does not ensure the behavior is correct. However the
1034 1037 # behavior was not correct before filtering either and "incorrect
1035 1038 # behavior" is seen as better as "crash"
1036 1039 #
1037 1040 # Linkrevs have several serious troubles with filtering that are
1038 1041 # complicated to solve. Proper handling of the issue here should be
1039 1042 # considered when solving linkrev issue are on the table.
1040 1043 return changectx(self._repo.unfiltered(), self._changeid)
1041 1044
1042 1045 def filectx(self, fileid, changeid=None):
1043 1046 '''opens an arbitrary revision of the file without
1044 1047 opening a new filelog'''
1045 1048 return filectx(self._repo, self._path, fileid=fileid,
1046 1049 filelog=self._filelog, changeid=changeid)
1047 1050
1048 1051 def data(self):
1049 1052 try:
1050 1053 return self._filelog.read(self._filenode)
1051 1054 except error.CensoredNodeError:
1052 1055 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1053 1056 return ""
1054 1057 raise util.Abort(_("censored node: %s") % short(self._filenode),
1055 1058 hint=_("set censor.policy to ignore errors"))
1056 1059
1057 1060 def size(self):
1058 1061 return self._filelog.size(self._filerev)
1059 1062
1060 1063 def renamed(self):
1061 1064 """check if file was actually renamed in this changeset revision
1062 1065
1063 1066 If rename logged in file revision, we report copy for changeset only
1064 1067 if file revisions linkrev points back to the changeset in question
1065 1068 or both changeset parents contain different file revisions.
1066 1069 """
1067 1070
1068 1071 renamed = self._filelog.renamed(self._filenode)
1069 1072 if not renamed:
1070 1073 return renamed
1071 1074
1072 1075 if self.rev() == self.linkrev():
1073 1076 return renamed
1074 1077
1075 1078 name = self.path()
1076 1079 fnode = self._filenode
1077 1080 for p in self._changectx.parents():
1078 1081 try:
1079 1082 if fnode == p.filenode(name):
1080 1083 return None
1081 1084 except error.LookupError:
1082 1085 pass
1083 1086 return renamed
1084 1087
1085 1088 def children(self):
1086 1089 # hard for renames
1087 1090 c = self._filelog.children(self._filenode)
1088 1091 return [filectx(self._repo, self._path, fileid=x,
1089 1092 filelog=self._filelog) for x in c]
1090 1093
1091 1094 class committablectx(basectx):
1092 1095 """A committablectx object provides common functionality for a context that
1093 1096 wants the ability to commit, e.g. workingctx or memctx."""
1094 1097 def __init__(self, repo, text="", user=None, date=None, extra=None,
1095 1098 changes=None):
1096 1099 self._repo = repo
1097 1100 self._rev = None
1098 1101 self._node = None
1099 1102 self._text = text
1100 1103 if date:
1101 1104 self._date = util.parsedate(date)
1102 1105 if user:
1103 1106 self._user = user
1104 1107 if changes:
1105 1108 self._status = changes
1106 1109
1107 1110 self._extra = {}
1108 1111 if extra:
1109 1112 self._extra = extra.copy()
1110 1113 if 'branch' not in self._extra:
1111 1114 try:
1112 1115 branch = encoding.fromlocal(self._repo.dirstate.branch())
1113 1116 except UnicodeDecodeError:
1114 1117 raise util.Abort(_('branch name not in UTF-8!'))
1115 1118 self._extra['branch'] = branch
1116 1119 if self._extra['branch'] == '':
1117 1120 self._extra['branch'] = 'default'
1118 1121
1119 1122 def __str__(self):
1120 1123 return str(self._parents[0]) + "+"
1121 1124
1122 1125 def __nonzero__(self):
1123 1126 return True
1124 1127
1125 1128 def _buildflagfunc(self):
1126 1129 # Create a fallback function for getting file flags when the
1127 1130 # filesystem doesn't support them
1128 1131
1129 1132 copiesget = self._repo.dirstate.copies().get
1130 1133
1131 1134 if len(self._parents) < 2:
1132 1135 # when we have one parent, it's easy: copy from parent
1133 1136 man = self._parents[0].manifest()
1134 1137 def func(f):
1135 1138 f = copiesget(f, f)
1136 1139 return man.flags(f)
1137 1140 else:
1138 1141 # merges are tricky: we try to reconstruct the unstored
1139 1142 # result from the merge (issue1802)
1140 1143 p1, p2 = self._parents
1141 1144 pa = p1.ancestor(p2)
1142 1145 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1143 1146
1144 1147 def func(f):
1145 1148 f = copiesget(f, f) # may be wrong for merges with copies
1146 1149 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1147 1150 if fl1 == fl2:
1148 1151 return fl1
1149 1152 if fl1 == fla:
1150 1153 return fl2
1151 1154 if fl2 == fla:
1152 1155 return fl1
1153 1156 return '' # punt for conflicts
1154 1157
1155 1158 return func
1156 1159
1157 1160 @propertycache
1158 1161 def _flagfunc(self):
1159 1162 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1160 1163
1161 1164 @propertycache
1162 1165 def _manifest(self):
1163 1166 """generate a manifest corresponding to the values in self._status
1164 1167
1165 1168 This reuse the file nodeid from parent, but we append an extra letter
1166 1169 when modified. Modified files get an extra 'm' while added files get
1167 1170 an extra 'a'. This is used by manifests merge to see that files
1168 1171 are different and by update logic to avoid deleting newly added files.
1169 1172 """
1170 1173
1171 1174 man1 = self._parents[0].manifest()
1172 1175 man = man1.copy()
1173 1176 if len(self._parents) > 1:
1174 1177 man2 = self.p2().manifest()
1175 1178 def getman(f):
1176 1179 if f in man1:
1177 1180 return man1
1178 1181 return man2
1179 1182 else:
1180 1183 getman = lambda f: man1
1181 1184
1182 1185 copied = self._repo.dirstate.copies()
1183 1186 ff = self._flagfunc
1184 1187 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1185 1188 for f in l:
1186 1189 orig = copied.get(f, f)
1187 1190 man[f] = getman(orig).get(orig, nullid) + i
1188 1191 try:
1189 1192 man.setflag(f, ff(f))
1190 1193 except OSError:
1191 1194 pass
1192 1195
1193 1196 for f in self._status.deleted + self._status.removed:
1194 1197 if f in man:
1195 1198 del man[f]
1196 1199
1197 1200 return man
1198 1201
1199 1202 @propertycache
1200 1203 def _status(self):
1201 1204 return self._repo.status()
1202 1205
1203 1206 @propertycache
1204 1207 def _user(self):
1205 1208 return self._repo.ui.username()
1206 1209
1207 1210 @propertycache
1208 1211 def _date(self):
1209 1212 return util.makedate()
1210 1213
1211 1214 def subrev(self, subpath):
1212 1215 return None
1213 1216
1214 1217 def manifestnode(self):
1215 1218 return None
1216 1219 def user(self):
1217 1220 return self._user or self._repo.ui.username()
1218 1221 def date(self):
1219 1222 return self._date
1220 1223 def description(self):
1221 1224 return self._text
1222 1225 def files(self):
1223 1226 return sorted(self._status.modified + self._status.added +
1224 1227 self._status.removed)
1225 1228
1226 1229 def modified(self):
1227 1230 return self._status.modified
1228 1231 def added(self):
1229 1232 return self._status.added
1230 1233 def removed(self):
1231 1234 return self._status.removed
1232 1235 def deleted(self):
1233 1236 return self._status.deleted
1234 1237 def branch(self):
1235 1238 return encoding.tolocal(self._extra['branch'])
1236 1239 def closesbranch(self):
1237 1240 return 'close' in self._extra
1238 1241 def extra(self):
1239 1242 return self._extra
1240 1243
1241 1244 def tags(self):
1242 1245 t = []
1243 1246 for p in self.parents():
1244 1247 t.extend(p.tags())
1245 1248 return t
1246 1249
1247 1250 def bookmarks(self):
1248 1251 b = []
1249 1252 for p in self.parents():
1250 1253 b.extend(p.bookmarks())
1251 1254 return b
1252 1255
1253 1256 def phase(self):
1254 1257 phase = phases.draft # default phase to draft
1255 1258 for p in self.parents():
1256 1259 phase = max(phase, p.phase())
1257 1260 return phase
1258 1261
1259 1262 def hidden(self):
1260 1263 return False
1261 1264
1262 1265 def children(self):
1263 1266 return []
1264 1267
1265 1268 def flags(self, path):
1266 1269 if '_manifest' in self.__dict__:
1267 1270 try:
1268 1271 return self._manifest.flags(path)
1269 1272 except KeyError:
1270 1273 return ''
1271 1274
1272 1275 try:
1273 1276 return self._flagfunc(path)
1274 1277 except OSError:
1275 1278 return ''
1276 1279
1277 1280 def ancestor(self, c2):
1278 1281 """return the "best" ancestor context of self and c2"""
1279 1282 return self._parents[0].ancestor(c2) # punt on two parents for now
1280 1283
1281 1284 def walk(self, match):
1282 1285 '''Generates matching file names.'''
1283 1286 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1284 1287 True, False))
1285 1288
1286 1289 def matches(self, match):
1287 1290 return sorted(self._repo.dirstate.matches(match))
1288 1291
1289 1292 def ancestors(self):
1290 1293 for p in self._parents:
1291 1294 yield p
1292 1295 for a in self._repo.changelog.ancestors(
1293 1296 [p.rev() for p in self._parents]):
1294 1297 yield changectx(self._repo, a)
1295 1298
1296 1299 def markcommitted(self, node):
1297 1300 """Perform post-commit cleanup necessary after committing this ctx
1298 1301
1299 1302 Specifically, this updates backing stores this working context
1300 1303 wraps to reflect the fact that the changes reflected by this
1301 1304 workingctx have been committed. For example, it marks
1302 1305 modified and added files as normal in the dirstate.
1303 1306
1304 1307 """
1305 1308
1306 1309 self._repo.dirstate.beginparentchange()
1307 1310 for f in self.modified() + self.added():
1308 1311 self._repo.dirstate.normal(f)
1309 1312 for f in self.removed():
1310 1313 self._repo.dirstate.drop(f)
1311 1314 self._repo.dirstate.setparents(node)
1312 1315 self._repo.dirstate.endparentchange()
1313 1316
1314 1317 class workingctx(committablectx):
1315 1318 """A workingctx object makes access to data related to
1316 1319 the current working directory convenient.
1317 1320 date - any valid date string or (unixtime, offset), or None.
1318 1321 user - username string, or None.
1319 1322 extra - a dictionary of extra values, or None.
1320 1323 changes - a list of file lists as returned by localrepo.status()
1321 1324 or None to use the repository status.
1322 1325 """
1323 1326 def __init__(self, repo, text="", user=None, date=None, extra=None,
1324 1327 changes=None):
1325 1328 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1326 1329
1327 1330 def __iter__(self):
1328 1331 d = self._repo.dirstate
1329 1332 for f in d:
1330 1333 if d[f] != 'r':
1331 1334 yield f
1332 1335
1333 1336 def __contains__(self, key):
1334 1337 return self._repo.dirstate[key] not in "?r"
1335 1338
1336 1339 @propertycache
1337 1340 def _parents(self):
1338 1341 p = self._repo.dirstate.parents()
1339 1342 if p[1] == nullid:
1340 1343 p = p[:-1]
1341 1344 return [changectx(self._repo, x) for x in p]
1342 1345
1343 1346 def filectx(self, path, filelog=None):
1344 1347 """get a file context from the working directory"""
1345 1348 return workingfilectx(self._repo, path, workingctx=self,
1346 1349 filelog=filelog)
1347 1350
1348 1351 def dirty(self, missing=False, merge=True, branch=True):
1349 1352 "check whether a working directory is modified"
1350 1353 # check subrepos first
1351 1354 for s in sorted(self.substate):
1352 1355 if self.sub(s).dirty():
1353 1356 return True
1354 1357 # check current working dir
1355 1358 return ((merge and self.p2()) or
1356 1359 (branch and self.branch() != self.p1().branch()) or
1357 1360 self.modified() or self.added() or self.removed() or
1358 1361 (missing and self.deleted()))
1359 1362
1360 1363 def add(self, list, prefix=""):
1361 1364 join = lambda f: os.path.join(prefix, f)
1362 1365 wlock = self._repo.wlock()
1363 1366 ui, ds = self._repo.ui, self._repo.dirstate
1364 1367 try:
1365 1368 rejected = []
1366 1369 lstat = self._repo.wvfs.lstat
1367 1370 for f in list:
1368 1371 scmutil.checkportable(ui, join(f))
1369 1372 try:
1370 1373 st = lstat(f)
1371 1374 except OSError:
1372 1375 ui.warn(_("%s does not exist!\n") % join(f))
1373 1376 rejected.append(f)
1374 1377 continue
1375 1378 if st.st_size > 10000000:
1376 1379 ui.warn(_("%s: up to %d MB of RAM may be required "
1377 1380 "to manage this file\n"
1378 1381 "(use 'hg revert %s' to cancel the "
1379 1382 "pending addition)\n")
1380 1383 % (f, 3 * st.st_size // 1000000, join(f)))
1381 1384 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1382 1385 ui.warn(_("%s not added: only files and symlinks "
1383 1386 "supported currently\n") % join(f))
1384 1387 rejected.append(f)
1385 1388 elif ds[f] in 'amn':
1386 1389 ui.warn(_("%s already tracked!\n") % join(f))
1387 1390 elif ds[f] == 'r':
1388 1391 ds.normallookup(f)
1389 1392 else:
1390 1393 ds.add(f)
1391 1394 return rejected
1392 1395 finally:
1393 1396 wlock.release()
1394 1397
1395 1398 def forget(self, files, prefix=""):
1396 1399 join = lambda f: os.path.join(prefix, f)
1397 1400 wlock = self._repo.wlock()
1398 1401 try:
1399 1402 rejected = []
1400 1403 for f in files:
1401 1404 if f not in self._repo.dirstate:
1402 1405 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1403 1406 rejected.append(f)
1404 1407 elif self._repo.dirstate[f] != 'a':
1405 1408 self._repo.dirstate.remove(f)
1406 1409 else:
1407 1410 self._repo.dirstate.drop(f)
1408 1411 return rejected
1409 1412 finally:
1410 1413 wlock.release()
1411 1414
1412 1415 def undelete(self, list):
1413 1416 pctxs = self.parents()
1414 1417 wlock = self._repo.wlock()
1415 1418 try:
1416 1419 for f in list:
1417 1420 if self._repo.dirstate[f] != 'r':
1418 1421 self._repo.ui.warn(_("%s not removed!\n") % f)
1419 1422 else:
1420 1423 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1421 1424 t = fctx.data()
1422 1425 self._repo.wwrite(f, t, fctx.flags())
1423 1426 self._repo.dirstate.normal(f)
1424 1427 finally:
1425 1428 wlock.release()
1426 1429
1427 1430 def copy(self, source, dest):
1428 1431 try:
1429 1432 st = self._repo.wvfs.lstat(dest)
1430 1433 except OSError, err:
1431 1434 if err.errno != errno.ENOENT:
1432 1435 raise
1433 1436 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1434 1437 return
1435 1438 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1436 1439 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1437 1440 "symbolic link\n") % dest)
1438 1441 else:
1439 1442 wlock = self._repo.wlock()
1440 1443 try:
1441 1444 if self._repo.dirstate[dest] in '?':
1442 1445 self._repo.dirstate.add(dest)
1443 1446 elif self._repo.dirstate[dest] in 'r':
1444 1447 self._repo.dirstate.normallookup(dest)
1445 1448 self._repo.dirstate.copy(source, dest)
1446 1449 finally:
1447 1450 wlock.release()
1448 1451
1449 1452 def match(self, pats=[], include=None, exclude=None, default='glob',
1450 1453 listsubrepos=False):
1451 1454 r = self._repo
1452 1455
1453 1456 # Only a case insensitive filesystem needs magic to translate user input
1454 1457 # to actual case in the filesystem.
1455 1458 if not util.checkcase(r.root):
1456 1459 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1457 1460 exclude, default, r.auditor, self,
1458 1461 listsubrepos=listsubrepos)
1459 1462 return matchmod.match(r.root, r.getcwd(), pats,
1460 1463 include, exclude, default,
1461 1464 auditor=r.auditor, ctx=self,
1462 1465 listsubrepos=listsubrepos)
1463 1466
1464 1467 def _filtersuspectsymlink(self, files):
1465 1468 if not files or self._repo.dirstate._checklink:
1466 1469 return files
1467 1470
1468 1471 # Symlink placeholders may get non-symlink-like contents
1469 1472 # via user error or dereferencing by NFS or Samba servers,
1470 1473 # so we filter out any placeholders that don't look like a
1471 1474 # symlink
1472 1475 sane = []
1473 1476 for f in files:
1474 1477 if self.flags(f) == 'l':
1475 1478 d = self[f].data()
1476 1479 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1477 1480 self._repo.ui.debug('ignoring suspect symlink placeholder'
1478 1481 ' "%s"\n' % f)
1479 1482 continue
1480 1483 sane.append(f)
1481 1484 return sane
1482 1485
1483 1486 def _checklookup(self, files):
1484 1487 # check for any possibly clean files
1485 1488 if not files:
1486 1489 return [], []
1487 1490
1488 1491 modified = []
1489 1492 fixup = []
1490 1493 pctx = self._parents[0]
1491 1494 # do a full compare of any files that might have changed
1492 1495 for f in sorted(files):
1493 1496 if (f not in pctx or self.flags(f) != pctx.flags(f)
1494 1497 or pctx[f].cmp(self[f])):
1495 1498 modified.append(f)
1496 1499 else:
1497 1500 fixup.append(f)
1498 1501
1499 1502 # update dirstate for files that are actually clean
1500 1503 if fixup:
1501 1504 try:
1502 1505 # updating the dirstate is optional
1503 1506 # so we don't wait on the lock
1504 1507 # wlock can invalidate the dirstate, so cache normal _after_
1505 1508 # taking the lock
1506 1509 wlock = self._repo.wlock(False)
1507 1510 normal = self._repo.dirstate.normal
1508 1511 try:
1509 1512 for f in fixup:
1510 1513 normal(f)
1511 1514 finally:
1512 1515 wlock.release()
1513 1516 except error.LockError:
1514 1517 pass
1515 1518 return modified, fixup
1516 1519
1517 1520 def _manifestmatches(self, match, s):
1518 1521 """Slow path for workingctx
1519 1522
1520 1523 The fast path is when we compare the working directory to its parent
1521 1524 which means this function is comparing with a non-parent; therefore we
1522 1525 need to build a manifest and return what matches.
1523 1526 """
1524 1527 mf = self._repo['.']._manifestmatches(match, s)
1525 1528 for f in s.modified + s.added:
1526 1529 mf[f] = _newnode
1527 1530 mf.setflag(f, self.flags(f))
1528 1531 for f in s.removed:
1529 1532 if f in mf:
1530 1533 del mf[f]
1531 1534 return mf
1532 1535
1533 1536 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1534 1537 unknown=False):
1535 1538 '''Gets the status from the dirstate -- internal use only.'''
1536 1539 listignored, listclean, listunknown = ignored, clean, unknown
1537 1540 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1538 1541 subrepos = []
1539 1542 if '.hgsub' in self:
1540 1543 subrepos = sorted(self.substate)
1541 1544 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1542 1545 listclean, listunknown)
1543 1546
1544 1547 # check for any possibly clean files
1545 1548 if cmp:
1546 1549 modified2, fixup = self._checklookup(cmp)
1547 1550 s.modified.extend(modified2)
1548 1551
1549 1552 # update dirstate for files that are actually clean
1550 1553 if fixup and listclean:
1551 1554 s.clean.extend(fixup)
1552 1555
1553 1556 if match.always():
1554 1557 # cache for performance
1555 1558 if s.unknown or s.ignored or s.clean:
1556 1559 # "_status" is cached with list*=False in the normal route
1557 1560 self._status = scmutil.status(s.modified, s.added, s.removed,
1558 1561 s.deleted, [], [], [])
1559 1562 else:
1560 1563 self._status = s
1561 1564
1562 1565 return s
1563 1566
1564 1567 def _buildstatus(self, other, s, match, listignored, listclean,
1565 1568 listunknown):
1566 1569 """build a status with respect to another context
1567 1570
1568 1571 This includes logic for maintaining the fast path of status when
1569 1572 comparing the working directory against its parent, which is to skip
1570 1573 building a new manifest if self (working directory) is not comparing
1571 1574 against its parent (repo['.']).
1572 1575 """
1573 1576 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1574 1577 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1575 1578 # might have accidentally ended up with the entire contents of the file
1576 1579 # they are supposed to be linking to.
1577 1580 s.modified[:] = self._filtersuspectsymlink(s.modified)
1578 1581 if other != self._repo['.']:
1579 1582 s = super(workingctx, self)._buildstatus(other, s, match,
1580 1583 listignored, listclean,
1581 1584 listunknown)
1582 1585 return s
1583 1586
1584 1587 def _matchstatus(self, other, match):
1585 1588 """override the match method with a filter for directory patterns
1586 1589
1587 1590 We use inheritance to customize the match.bad method only in cases of
1588 1591 workingctx since it belongs only to the working directory when
1589 1592 comparing against the parent changeset.
1590 1593
1591 1594 If we aren't comparing against the working directory's parent, then we
1592 1595 just use the default match object sent to us.
1593 1596 """
1594 1597 superself = super(workingctx, self)
1595 1598 match = superself._matchstatus(other, match)
1596 1599 if other != self._repo['.']:
1597 1600 def bad(f, msg):
1598 1601 # 'f' may be a directory pattern from 'match.files()',
1599 1602 # so 'f not in ctx1' is not enough
1600 1603 if f not in other and not other.hasdir(f):
1601 1604 self._repo.ui.warn('%s: %s\n' %
1602 1605 (self._repo.dirstate.pathto(f), msg))
1603 1606 match.bad = bad
1604 1607 return match
1605 1608
1606 1609 class committablefilectx(basefilectx):
1607 1610 """A committablefilectx provides common functionality for a file context
1608 1611 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1609 1612 def __init__(self, repo, path, filelog=None, ctx=None):
1610 1613 self._repo = repo
1611 1614 self._path = path
1612 1615 self._changeid = None
1613 1616 self._filerev = self._filenode = None
1614 1617
1615 1618 if filelog is not None:
1616 1619 self._filelog = filelog
1617 1620 if ctx:
1618 1621 self._changectx = ctx
1619 1622
1620 1623 def __nonzero__(self):
1621 1624 return True
1622 1625
1623 1626 def linkrev(self):
1624 1627 # linked to self._changectx no matter if file is modified or not
1625 1628 return self.rev()
1626 1629
1627 1630 def parents(self):
1628 1631 '''return parent filectxs, following copies if necessary'''
1629 1632 def filenode(ctx, path):
1630 1633 return ctx._manifest.get(path, nullid)
1631 1634
1632 1635 path = self._path
1633 1636 fl = self._filelog
1634 1637 pcl = self._changectx._parents
1635 1638 renamed = self.renamed()
1636 1639
1637 1640 if renamed:
1638 1641 pl = [renamed + (None,)]
1639 1642 else:
1640 1643 pl = [(path, filenode(pcl[0], path), fl)]
1641 1644
1642 1645 for pc in pcl[1:]:
1643 1646 pl.append((path, filenode(pc, path), fl))
1644 1647
1645 1648 return [self._parentfilectx(p, fileid=n, filelog=l)
1646 1649 for p, n, l in pl if n != nullid]
1647 1650
1648 1651 def children(self):
1649 1652 return []
1650 1653
1651 1654 class workingfilectx(committablefilectx):
1652 1655 """A workingfilectx object makes access to data related to a particular
1653 1656 file in the working directory convenient."""
1654 1657 def __init__(self, repo, path, filelog=None, workingctx=None):
1655 1658 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1656 1659
1657 1660 @propertycache
1658 1661 def _changectx(self):
1659 1662 return workingctx(self._repo)
1660 1663
1661 1664 def data(self):
1662 1665 return self._repo.wread(self._path)
1663 1666 def renamed(self):
1664 1667 rp = self._repo.dirstate.copied(self._path)
1665 1668 if not rp:
1666 1669 return None
1667 1670 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1668 1671
1669 1672 def size(self):
1670 1673 return self._repo.wvfs.lstat(self._path).st_size
1671 1674 def date(self):
1672 1675 t, tz = self._changectx.date()
1673 1676 try:
1674 1677 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1675 1678 except OSError, err:
1676 1679 if err.errno != errno.ENOENT:
1677 1680 raise
1678 1681 return (t, tz)
1679 1682
1680 1683 def cmp(self, fctx):
1681 1684 """compare with other file context
1682 1685
1683 1686 returns True if different than fctx.
1684 1687 """
1685 1688 # fctx should be a filectx (not a workingfilectx)
1686 1689 # invert comparison to reuse the same code path
1687 1690 return fctx.cmp(self)
1688 1691
1689 1692 def remove(self, ignoremissing=False):
1690 1693 """wraps unlink for a repo's working directory"""
1691 1694 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1692 1695
1693 1696 def write(self, data, flags):
1694 1697 """wraps repo.wwrite"""
1695 1698 self._repo.wwrite(self._path, data, flags)
1696 1699
1697 1700 class workingcommitctx(workingctx):
1698 1701 """A workingcommitctx object makes access to data related to
1699 1702 the revision being committed convenient.
1700 1703
1701 1704 This hides changes in the working directory, if they aren't
1702 1705 committed in this context.
1703 1706 """
1704 1707 def __init__(self, repo, changes,
1705 1708 text="", user=None, date=None, extra=None):
1706 1709 super(workingctx, self).__init__(repo, text, user, date, extra,
1707 1710 changes)
1708 1711
1709 1712 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1710 1713 unknown=False):
1711 1714 """Return matched files only in ``self._status``
1712 1715
1713 1716 Uncommitted files appear "clean" via this context, even if
1714 1717 they aren't actually so in the working directory.
1715 1718 """
1716 1719 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1717 1720 if clean:
1718 1721 clean = [f for f in self._manifest if f not in self._changedset]
1719 1722 else:
1720 1723 clean = []
1721 1724 return scmutil.status([f for f in self._status.modified if match(f)],
1722 1725 [f for f in self._status.added if match(f)],
1723 1726 [f for f in self._status.removed if match(f)],
1724 1727 [], [], [], clean)
1725 1728
1726 1729 @propertycache
1727 1730 def _changedset(self):
1728 1731 """Return the set of files changed in this context
1729 1732 """
1730 1733 changed = set(self._status.modified)
1731 1734 changed.update(self._status.added)
1732 1735 changed.update(self._status.removed)
1733 1736 return changed
1734 1737
1735 1738 class memctx(committablectx):
1736 1739 """Use memctx to perform in-memory commits via localrepo.commitctx().
1737 1740
1738 1741 Revision information is supplied at initialization time while
1739 1742 related files data and is made available through a callback
1740 1743 mechanism. 'repo' is the current localrepo, 'parents' is a
1741 1744 sequence of two parent revisions identifiers (pass None for every
1742 1745 missing parent), 'text' is the commit message and 'files' lists
1743 1746 names of files touched by the revision (normalized and relative to
1744 1747 repository root).
1745 1748
1746 1749 filectxfn(repo, memctx, path) is a callable receiving the
1747 1750 repository, the current memctx object and the normalized path of
1748 1751 requested file, relative to repository root. It is fired by the
1749 1752 commit function for every file in 'files', but calls order is
1750 1753 undefined. If the file is available in the revision being
1751 1754 committed (updated or added), filectxfn returns a memfilectx
1752 1755 object. If the file was removed, filectxfn raises an
1753 1756 IOError. Moved files are represented by marking the source file
1754 1757 removed and the new file added with copy information (see
1755 1758 memfilectx).
1756 1759
1757 1760 user receives the committer name and defaults to current
1758 1761 repository username, date is the commit date in any format
1759 1762 supported by util.parsedate() and defaults to current date, extra
1760 1763 is a dictionary of metadata or is left empty.
1761 1764 """
1762 1765
1763 1766 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1764 1767 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1765 1768 # this field to determine what to do in filectxfn.
1766 1769 _returnnoneformissingfiles = True
1767 1770
1768 1771 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1769 1772 date=None, extra=None, editor=False):
1770 1773 super(memctx, self).__init__(repo, text, user, date, extra)
1771 1774 self._rev = None
1772 1775 self._node = None
1773 1776 parents = [(p or nullid) for p in parents]
1774 1777 p1, p2 = parents
1775 1778 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1776 1779 files = sorted(set(files))
1777 1780 self._files = files
1778 1781 self.substate = {}
1779 1782
1780 1783 # if store is not callable, wrap it in a function
1781 1784 if not callable(filectxfn):
1782 1785 def getfilectx(repo, memctx, path):
1783 1786 fctx = filectxfn[path]
1784 1787 # this is weird but apparently we only keep track of one parent
1785 1788 # (why not only store that instead of a tuple?)
1786 1789 copied = fctx.renamed()
1787 1790 if copied:
1788 1791 copied = copied[0]
1789 1792 return memfilectx(repo, path, fctx.data(),
1790 1793 islink=fctx.islink(), isexec=fctx.isexec(),
1791 1794 copied=copied, memctx=memctx)
1792 1795 self._filectxfn = getfilectx
1793 1796 else:
1794 1797 # "util.cachefunc" reduces invocation of possibly expensive
1795 1798 # "filectxfn" for performance (e.g. converting from another VCS)
1796 1799 self._filectxfn = util.cachefunc(filectxfn)
1797 1800
1798 1801 if extra:
1799 1802 self._extra = extra.copy()
1800 1803 else:
1801 1804 self._extra = {}
1802 1805
1803 1806 if self._extra.get('branch', '') == '':
1804 1807 self._extra['branch'] = 'default'
1805 1808
1806 1809 if editor:
1807 1810 self._text = editor(self._repo, self, [])
1808 1811 self._repo.savecommitmessage(self._text)
1809 1812
1810 1813 def filectx(self, path, filelog=None):
1811 1814 """get a file context from the working directory
1812 1815
1813 1816 Returns None if file doesn't exist and should be removed."""
1814 1817 return self._filectxfn(self._repo, self, path)
1815 1818
1816 1819 def commit(self):
1817 1820 """commit context to the repo"""
1818 1821 return self._repo.commitctx(self)
1819 1822
1820 1823 @propertycache
1821 1824 def _manifest(self):
1822 1825 """generate a manifest based on the return values of filectxfn"""
1823 1826
1824 1827 # keep this simple for now; just worry about p1
1825 1828 pctx = self._parents[0]
1826 1829 man = pctx.manifest().copy()
1827 1830
1828 1831 for f in self._status.modified:
1829 1832 p1node = nullid
1830 1833 p2node = nullid
1831 1834 p = pctx[f].parents() # if file isn't in pctx, check p2?
1832 1835 if len(p) > 0:
1833 1836 p1node = p[0].node()
1834 1837 if len(p) > 1:
1835 1838 p2node = p[1].node()
1836 1839 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1837 1840
1838 1841 for f in self._status.added:
1839 1842 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1840 1843
1841 1844 for f in self._status.removed:
1842 1845 if f in man:
1843 1846 del man[f]
1844 1847
1845 1848 return man
1846 1849
1847 1850 @propertycache
1848 1851 def _status(self):
1849 1852 """Calculate exact status from ``files`` specified at construction
1850 1853 """
1851 1854 man1 = self.p1().manifest()
1852 1855 p2 = self._parents[1]
1853 1856 # "1 < len(self._parents)" can't be used for checking
1854 1857 # existence of the 2nd parent, because "memctx._parents" is
1855 1858 # explicitly initialized by the list, of which length is 2.
1856 1859 if p2.node() != nullid:
1857 1860 man2 = p2.manifest()
1858 1861 managing = lambda f: f in man1 or f in man2
1859 1862 else:
1860 1863 managing = lambda f: f in man1
1861 1864
1862 1865 modified, added, removed = [], [], []
1863 1866 for f in self._files:
1864 1867 if not managing(f):
1865 1868 added.append(f)
1866 1869 elif self[f]:
1867 1870 modified.append(f)
1868 1871 else:
1869 1872 removed.append(f)
1870 1873
1871 1874 return scmutil.status(modified, added, removed, [], [], [], [])
1872 1875
1873 1876 class memfilectx(committablefilectx):
1874 1877 """memfilectx represents an in-memory file to commit.
1875 1878
1876 1879 See memctx and committablefilectx for more details.
1877 1880 """
1878 1881 def __init__(self, repo, path, data, islink=False,
1879 1882 isexec=False, copied=None, memctx=None):
1880 1883 """
1881 1884 path is the normalized file path relative to repository root.
1882 1885 data is the file content as a string.
1883 1886 islink is True if the file is a symbolic link.
1884 1887 isexec is True if the file is executable.
1885 1888 copied is the source file path if current file was copied in the
1886 1889 revision being committed, or None."""
1887 1890 super(memfilectx, self).__init__(repo, path, None, memctx)
1888 1891 self._data = data
1889 1892 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1890 1893 self._copied = None
1891 1894 if copied:
1892 1895 self._copied = (copied, nullid)
1893 1896
1894 1897 def data(self):
1895 1898 return self._data
1896 1899 def size(self):
1897 1900 return len(self.data())
1898 1901 def flags(self):
1899 1902 return self._flags
1900 1903 def renamed(self):
1901 1904 return self._copied
1902 1905
1903 1906 def remove(self, ignoremissing=False):
1904 1907 """wraps unlink for a repo's working directory"""
1905 1908 # need to figure out what to do here
1906 1909 del self._changectx[self._path]
1907 1910
1908 1911 def write(self, data, flags):
1909 1912 """wraps repo.wwrite"""
1910 1913 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now