##// END OF EJS Templates
memctx: simplify _manifest with new revlog nodeids...
Sean Farley -
r39749:a5dafefc default
parent child Browse files
Show More
@@ -1,2507 +1,2499 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import filecmp
12 12 import os
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirfilenodeids,
25 25 wdirid,
26 26 )
27 27 from . import (
28 28 dagop,
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 obsolete as obsmod,
34 34 patch,
35 35 pathutil,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 revlog,
40 39 scmutil,
41 40 sparse,
42 41 subrepo,
43 42 subrepoutil,
44 43 util,
45 44 )
46 45 from .utils import (
47 46 dateutil,
48 47 stringutil,
49 48 )
50 49
51 50 propertycache = util.propertycache
52 51
53 52 class basectx(object):
54 53 """A basectx object represents the common logic for its children:
55 54 changectx: read-only context that is already present in the repo,
56 55 workingctx: a context that represents the working directory and can
57 56 be committed,
58 57 memctx: a context that represents changes in-memory and can also
59 58 be committed."""
60 59
61 60 def __init__(self, repo):
62 61 self._repo = repo
63 62
64 63 def __bytes__(self):
65 64 return short(self.node())
66 65
67 66 __str__ = encoding.strmethod(__bytes__)
68 67
69 68 def __repr__(self):
70 69 return r"<%s %s>" % (type(self).__name__, str(self))
71 70
72 71 def __eq__(self, other):
73 72 try:
74 73 return type(self) == type(other) and self._rev == other._rev
75 74 except AttributeError:
76 75 return False
77 76
78 77 def __ne__(self, other):
79 78 return not (self == other)
80 79
81 80 def __contains__(self, key):
82 81 return key in self._manifest
83 82
84 83 def __getitem__(self, key):
85 84 return self.filectx(key)
86 85
87 86 def __iter__(self):
88 87 return iter(self._manifest)
89 88
90 89 def _buildstatusmanifest(self, status):
91 90 """Builds a manifest that includes the given status results, if this is
92 91 a working copy context. For non-working copy contexts, it just returns
93 92 the normal manifest."""
94 93 return self.manifest()
95 94
96 95 def _matchstatus(self, other, match):
97 96 """This internal method provides a way for child objects to override the
98 97 match operator.
99 98 """
100 99 return match
101 100
102 101 def _buildstatus(self, other, s, match, listignored, listclean,
103 102 listunknown):
104 103 """build a status with respect to another context"""
105 104 # Load earliest manifest first for caching reasons. More specifically,
106 105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
107 106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
108 107 # 1000 and cache it so that when you read 1001, we just need to apply a
109 108 # delta to what's in the cache. So that's one full reconstruction + one
110 109 # delta application.
111 110 mf2 = None
112 111 if self.rev() is not None and self.rev() < other.rev():
113 112 mf2 = self._buildstatusmanifest(s)
114 113 mf1 = other._buildstatusmanifest(s)
115 114 if mf2 is None:
116 115 mf2 = self._buildstatusmanifest(s)
117 116
118 117 modified, added = [], []
119 118 removed = []
120 119 clean = []
121 120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
122 121 deletedset = set(deleted)
123 122 d = mf1.diff(mf2, match=match, clean=listclean)
124 123 for fn, value in d.iteritems():
125 124 if fn in deletedset:
126 125 continue
127 126 if value is None:
128 127 clean.append(fn)
129 128 continue
130 129 (node1, flag1), (node2, flag2) = value
131 130 if node1 is None:
132 131 added.append(fn)
133 132 elif node2 is None:
134 133 removed.append(fn)
135 134 elif flag1 != flag2:
136 135 modified.append(fn)
137 136 elif node2 not in wdirfilenodeids:
138 137 # When comparing files between two commits, we save time by
139 138 # not comparing the file contents when the nodeids differ.
140 139 # Note that this means we incorrectly report a reverted change
141 140 # to a file as a modification.
142 141 modified.append(fn)
143 142 elif self[fn].cmp(other[fn]):
144 143 modified.append(fn)
145 144 else:
146 145 clean.append(fn)
147 146
148 147 if removed:
149 148 # need to filter files if they are already reported as removed
150 149 unknown = [fn for fn in unknown if fn not in mf1 and
151 150 (not match or match(fn))]
152 151 ignored = [fn for fn in ignored if fn not in mf1 and
153 152 (not match or match(fn))]
154 153 # if they're deleted, don't report them as removed
155 154 removed = [fn for fn in removed if fn not in deletedset]
156 155
157 156 return scmutil.status(modified, added, removed, deleted, unknown,
158 157 ignored, clean)
159 158
160 159 @propertycache
161 160 def substate(self):
162 161 return subrepoutil.state(self, self._repo.ui)
163 162
164 163 def subrev(self, subpath):
165 164 return self.substate[subpath][1]
166 165
167 166 def rev(self):
168 167 return self._rev
169 168 def node(self):
170 169 return self._node
171 170 def hex(self):
172 171 return hex(self.node())
173 172 def manifest(self):
174 173 return self._manifest
175 174 def manifestctx(self):
176 175 return self._manifestctx
177 176 def repo(self):
178 177 return self._repo
179 178 def phasestr(self):
180 179 return phases.phasenames[self.phase()]
181 180 def mutable(self):
182 181 return self.phase() > phases.public
183 182
184 183 def matchfileset(self, expr, badfn=None):
185 184 return fileset.match(self, expr, badfn=badfn)
186 185
187 186 def obsolete(self):
188 187 """True if the changeset is obsolete"""
189 188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
190 189
191 190 def extinct(self):
192 191 """True if the changeset is extinct"""
193 192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
194 193
195 194 def orphan(self):
196 195 """True if the changeset is not obsolete, but its ancestor is"""
197 196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
198 197
199 198 def phasedivergent(self):
200 199 """True if the changeset tries to be a successor of a public changeset
201 200
202 201 Only non-public and non-obsolete changesets may be phase-divergent.
203 202 """
204 203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
205 204
206 205 def contentdivergent(self):
207 206 """Is a successor of a changeset with multiple possible successor sets
208 207
209 208 Only non-public and non-obsolete changesets may be content-divergent.
210 209 """
211 210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
212 211
213 212 def isunstable(self):
214 213 """True if the changeset is either orphan, phase-divergent or
215 214 content-divergent"""
216 215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
217 216
218 217 def instabilities(self):
219 218 """return the list of instabilities affecting this changeset.
220 219
221 220 Instabilities are returned as strings. possible values are:
222 221 - orphan,
223 222 - phase-divergent,
224 223 - content-divergent.
225 224 """
226 225 instabilities = []
227 226 if self.orphan():
228 227 instabilities.append('orphan')
229 228 if self.phasedivergent():
230 229 instabilities.append('phase-divergent')
231 230 if self.contentdivergent():
232 231 instabilities.append('content-divergent')
233 232 return instabilities
234 233
235 234 def parents(self):
236 235 """return contexts for each parent changeset"""
237 236 return self._parents
238 237
239 238 def p1(self):
240 239 return self._parents[0]
241 240
242 241 def p2(self):
243 242 parents = self._parents
244 243 if len(parents) == 2:
245 244 return parents[1]
246 245 return changectx(self._repo, nullrev)
247 246
248 247 def _fileinfo(self, path):
249 248 if r'_manifest' in self.__dict__:
250 249 try:
251 250 return self._manifest[path], self._manifest.flags(path)
252 251 except KeyError:
253 252 raise error.ManifestLookupError(self._node, path,
254 253 _('not found in manifest'))
255 254 if r'_manifestdelta' in self.__dict__ or path in self.files():
256 255 if path in self._manifestdelta:
257 256 return (self._manifestdelta[path],
258 257 self._manifestdelta.flags(path))
259 258 mfl = self._repo.manifestlog
260 259 try:
261 260 node, flag = mfl[self._changeset.manifest].find(path)
262 261 except KeyError:
263 262 raise error.ManifestLookupError(self._node, path,
264 263 _('not found in manifest'))
265 264
266 265 return node, flag
267 266
268 267 def filenode(self, path):
269 268 return self._fileinfo(path)[0]
270 269
271 270 def flags(self, path):
272 271 try:
273 272 return self._fileinfo(path)[1]
274 273 except error.LookupError:
275 274 return ''
276 275
277 276 def sub(self, path, allowcreate=True):
278 277 '''return a subrepo for the stored revision of path, never wdir()'''
279 278 return subrepo.subrepo(self, path, allowcreate=allowcreate)
280 279
281 280 def nullsub(self, path, pctx):
282 281 return subrepo.nullsubrepo(self, path, pctx)
283 282
284 283 def workingsub(self, path):
285 284 '''return a subrepo for the stored revision, or wdir if this is a wdir
286 285 context.
287 286 '''
288 287 return subrepo.subrepo(self, path, allowwdir=True)
289 288
290 289 def match(self, pats=None, include=None, exclude=None, default='glob',
291 290 listsubrepos=False, badfn=None):
292 291 r = self._repo
293 292 return matchmod.match(r.root, r.getcwd(), pats,
294 293 include, exclude, default,
295 294 auditor=r.nofsauditor, ctx=self,
296 295 listsubrepos=listsubrepos, badfn=badfn)
297 296
298 297 def diff(self, ctx2=None, match=None, changes=None, opts=None,
299 298 losedatafn=None, prefix='', relroot='', copy=None,
300 299 hunksfilterfn=None):
301 300 """Returns a diff generator for the given contexts and matcher"""
302 301 if ctx2 is None:
303 302 ctx2 = self.p1()
304 303 if ctx2 is not None:
305 304 ctx2 = self._repo[ctx2]
306 305 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
307 306 opts=opts, losedatafn=losedatafn, prefix=prefix,
308 307 relroot=relroot, copy=copy,
309 308 hunksfilterfn=hunksfilterfn)
310 309
311 310 def dirs(self):
312 311 return self._manifest.dirs()
313 312
314 313 def hasdir(self, dir):
315 314 return self._manifest.hasdir(dir)
316 315
317 316 def status(self, other=None, match=None, listignored=False,
318 317 listclean=False, listunknown=False, listsubrepos=False):
319 318 """return status of files between two nodes or node and working
320 319 directory.
321 320
322 321 If other is None, compare this node with working directory.
323 322
324 323 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 324 """
326 325
327 326 ctx1 = self
328 327 ctx2 = self._repo[other]
329 328
330 329 # This next code block is, admittedly, fragile logic that tests for
331 330 # reversing the contexts and wouldn't need to exist if it weren't for
332 331 # the fast (and common) code path of comparing the working directory
333 332 # with its first parent.
334 333 #
335 334 # What we're aiming for here is the ability to call:
336 335 #
337 336 # workingctx.status(parentctx)
338 337 #
339 338 # If we always built the manifest for each context and compared those,
340 339 # then we'd be done. But the special case of the above call means we
341 340 # just copy the manifest of the parent.
342 341 reversed = False
343 342 if (not isinstance(ctx1, changectx)
344 343 and isinstance(ctx2, changectx)):
345 344 reversed = True
346 345 ctx1, ctx2 = ctx2, ctx1
347 346
348 347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
349 348 match = ctx2._matchstatus(ctx1, match)
350 349 r = scmutil.status([], [], [], [], [], [], [])
351 350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 351 listunknown)
353 352
354 353 if reversed:
355 354 # Reverse added and removed. Clear deleted, unknown and ignored as
356 355 # these make no sense to reverse.
357 356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 357 r.clean)
359 358
360 359 if listsubrepos:
361 360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 361 try:
363 362 rev2 = ctx2.subrev(subpath)
364 363 except KeyError:
365 364 # A subrepo that existed in node1 was deleted between
366 365 # node1 and node2 (inclusive). Thus, ctx2's substate
367 366 # won't contain that subpath. The best we can do ignore it.
368 367 rev2 = None
369 368 submatch = matchmod.subdirmatcher(subpath, match)
370 369 s = sub.status(rev2, match=submatch, ignored=listignored,
371 370 clean=listclean, unknown=listunknown,
372 371 listsubrepos=True)
373 372 for rfiles, sfiles in zip(r, s):
374 373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375 374
376 375 narrowmatch = self._repo.narrowmatch()
377 376 if not narrowmatch.always():
378 377 for l in r:
379 378 l[:] = list(filter(narrowmatch, l))
380 379 for l in r:
381 380 l.sort()
382 381
383 382 return r
384 383
385 384 class changectx(basectx):
386 385 """A changecontext object makes access to data related to a particular
387 386 changeset convenient. It represents a read-only context already present in
388 387 the repo."""
389 388 def __init__(self, repo, changeid='.'):
390 389 """changeid is a revision number, node, or tag"""
391 390 super(changectx, self).__init__(repo)
392 391
393 392 try:
394 393 if isinstance(changeid, int):
395 394 self._node = repo.changelog.node(changeid)
396 395 self._rev = changeid
397 396 return
398 397 elif changeid == 'null':
399 398 self._node = nullid
400 399 self._rev = nullrev
401 400 return
402 401 elif changeid == 'tip':
403 402 self._node = repo.changelog.tip()
404 403 self._rev = repo.changelog.rev(self._node)
405 404 return
406 405 elif (changeid == '.'
407 406 or repo.local() and changeid == repo.dirstate.p1()):
408 407 # this is a hack to delay/avoid loading obsmarkers
409 408 # when we know that '.' won't be hidden
410 409 self._node = repo.dirstate.p1()
411 410 self._rev = repo.unfiltered().changelog.rev(self._node)
412 411 return
413 412 elif len(changeid) == 20:
414 413 try:
415 414 self._node = changeid
416 415 self._rev = repo.changelog.rev(changeid)
417 416 return
418 417 except error.FilteredLookupError:
419 418 changeid = hex(changeid) # for the error message
420 419 raise
421 420 except LookupError:
422 421 # check if it might have come from damaged dirstate
423 422 #
424 423 # XXX we could avoid the unfiltered if we had a recognizable
425 424 # exception for filtered changeset access
426 425 if (repo.local()
427 426 and changeid in repo.unfiltered().dirstate.parents()):
428 427 msg = _("working directory has unknown parent '%s'!")
429 428 raise error.Abort(msg % short(changeid))
430 429 changeid = hex(changeid) # for the error message
431 430
432 431 elif len(changeid) == 40:
433 432 try:
434 433 self._node = bin(changeid)
435 434 self._rev = repo.changelog.rev(self._node)
436 435 return
437 436 except error.FilteredLookupError:
438 437 raise
439 438 except (TypeError, LookupError):
440 439 pass
441 440 else:
442 441 raise error.ProgrammingError(
443 442 "unsupported changeid '%s' of type %s" %
444 443 (changeid, type(changeid)))
445 444
446 445 except (error.FilteredIndexError, error.FilteredLookupError):
447 446 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
448 447 % pycompat.bytestr(changeid))
449 448 except error.FilteredRepoLookupError:
450 449 raise
451 450 except IndexError:
452 451 pass
453 452 raise error.RepoLookupError(
454 453 _("unknown revision '%s'") % changeid)
455 454
456 455 def __hash__(self):
457 456 try:
458 457 return hash(self._rev)
459 458 except AttributeError:
460 459 return id(self)
461 460
462 461 def __nonzero__(self):
463 462 return self._rev != nullrev
464 463
465 464 __bool__ = __nonzero__
466 465
467 466 @propertycache
468 467 def _changeset(self):
469 468 return self._repo.changelog.changelogrevision(self.rev())
470 469
471 470 @propertycache
472 471 def _manifest(self):
473 472 return self._manifestctx.read()
474 473
475 474 @property
476 475 def _manifestctx(self):
477 476 return self._repo.manifestlog[self._changeset.manifest]
478 477
479 478 @propertycache
480 479 def _manifestdelta(self):
481 480 return self._manifestctx.readdelta()
482 481
483 482 @propertycache
484 483 def _parents(self):
485 484 repo = self._repo
486 485 p1, p2 = repo.changelog.parentrevs(self._rev)
487 486 if p2 == nullrev:
488 487 return [changectx(repo, p1)]
489 488 return [changectx(repo, p1), changectx(repo, p2)]
490 489
491 490 def changeset(self):
492 491 c = self._changeset
493 492 return (
494 493 c.manifest,
495 494 c.user,
496 495 c.date,
497 496 c.files,
498 497 c.description,
499 498 c.extra,
500 499 )
501 500 def manifestnode(self):
502 501 return self._changeset.manifest
503 502
504 503 def user(self):
505 504 return self._changeset.user
506 505 def date(self):
507 506 return self._changeset.date
508 507 def files(self):
509 508 return self._changeset.files
510 509 def description(self):
511 510 return self._changeset.description
512 511 def branch(self):
513 512 return encoding.tolocal(self._changeset.extra.get("branch"))
514 513 def closesbranch(self):
515 514 return 'close' in self._changeset.extra
516 515 def extra(self):
517 516 """Return a dict of extra information."""
518 517 return self._changeset.extra
519 518 def tags(self):
520 519 """Return a list of byte tag names"""
521 520 return self._repo.nodetags(self._node)
522 521 def bookmarks(self):
523 522 """Return a list of byte bookmark names."""
524 523 return self._repo.nodebookmarks(self._node)
525 524 def phase(self):
526 525 return self._repo._phasecache.phase(self._repo, self._rev)
527 526 def hidden(self):
528 527 return self._rev in repoview.filterrevs(self._repo, 'visible')
529 528
530 529 def isinmemory(self):
531 530 return False
532 531
533 532 def children(self):
534 533 """return list of changectx contexts for each child changeset.
535 534
536 535 This returns only the immediate child changesets. Use descendants() to
537 536 recursively walk children.
538 537 """
539 538 c = self._repo.changelog.children(self._node)
540 539 return [changectx(self._repo, x) for x in c]
541 540
542 541 def ancestors(self):
543 542 for a in self._repo.changelog.ancestors([self._rev]):
544 543 yield changectx(self._repo, a)
545 544
546 545 def descendants(self):
547 546 """Recursively yield all children of the changeset.
548 547
549 548 For just the immediate children, use children()
550 549 """
551 550 for d in self._repo.changelog.descendants([self._rev]):
552 551 yield changectx(self._repo, d)
553 552
554 553 def filectx(self, path, fileid=None, filelog=None):
555 554 """get a file context from this changeset"""
556 555 if fileid is None:
557 556 fileid = self.filenode(path)
558 557 return filectx(self._repo, path, fileid=fileid,
559 558 changectx=self, filelog=filelog)
560 559
561 560 def ancestor(self, c2, warn=False):
562 561 """return the "best" ancestor context of self and c2
563 562
564 563 If there are multiple candidates, it will show a message and check
565 564 merge.preferancestor configuration before falling back to the
566 565 revlog ancestor."""
567 566 # deal with workingctxs
568 567 n2 = c2._node
569 568 if n2 is None:
570 569 n2 = c2._parents[0]._node
571 570 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
572 571 if not cahs:
573 572 anc = nullid
574 573 elif len(cahs) == 1:
575 574 anc = cahs[0]
576 575 else:
577 576 # experimental config: merge.preferancestor
578 577 for r in self._repo.ui.configlist('merge', 'preferancestor'):
579 578 try:
580 579 ctx = scmutil.revsymbol(self._repo, r)
581 580 except error.RepoLookupError:
582 581 continue
583 582 anc = ctx.node()
584 583 if anc in cahs:
585 584 break
586 585 else:
587 586 anc = self._repo.changelog.ancestor(self._node, n2)
588 587 if warn:
589 588 self._repo.ui.status(
590 589 (_("note: using %s as ancestor of %s and %s\n") %
591 590 (short(anc), short(self._node), short(n2))) +
592 591 ''.join(_(" alternatively, use --config "
593 592 "merge.preferancestor=%s\n") %
594 593 short(n) for n in sorted(cahs) if n != anc))
595 594 return changectx(self._repo, anc)
596 595
597 596 def isancestorof(self, other):
598 597 """True if this changeset is an ancestor of other"""
599 598 return self._repo.changelog.isancestorrev(self._rev, other._rev)
600 599
601 600 def walk(self, match):
602 601 '''Generates matching file names.'''
603 602
604 603 # Wrap match.bad method to have message with nodeid
605 604 def bad(fn, msg):
606 605 # The manifest doesn't know about subrepos, so don't complain about
607 606 # paths into valid subrepos.
608 607 if any(fn == s or fn.startswith(s + '/')
609 608 for s in self.substate):
610 609 return
611 610 match.bad(fn, _('no such file in rev %s') % self)
612 611
613 612 m = matchmod.badmatch(match, bad)
614 613 return self._manifest.walk(m)
615 614
616 615 def matches(self, match):
617 616 return self.walk(match)
618 617
619 618 class basefilectx(object):
620 619 """A filecontext object represents the common logic for its children:
621 620 filectx: read-only access to a filerevision that is already present
622 621 in the repo,
623 622 workingfilectx: a filecontext that represents files from the working
624 623 directory,
625 624 memfilectx: a filecontext that represents files in-memory,
626 625 """
627 626 @propertycache
628 627 def _filelog(self):
629 628 return self._repo.file(self._path)
630 629
631 630 @propertycache
632 631 def _changeid(self):
633 632 if r'_changeid' in self.__dict__:
634 633 return self._changeid
635 634 elif r'_changectx' in self.__dict__:
636 635 return self._changectx.rev()
637 636 elif r'_descendantrev' in self.__dict__:
638 637 # this file context was created from a revision with a known
639 638 # descendant, we can (lazily) correct for linkrev aliases
640 639 return self._adjustlinkrev(self._descendantrev)
641 640 else:
642 641 return self._filelog.linkrev(self._filerev)
643 642
644 643 @propertycache
645 644 def _filenode(self):
646 645 if r'_fileid' in self.__dict__:
647 646 return self._filelog.lookup(self._fileid)
648 647 else:
649 648 return self._changectx.filenode(self._path)
650 649
651 650 @propertycache
652 651 def _filerev(self):
653 652 return self._filelog.rev(self._filenode)
654 653
655 654 @propertycache
656 655 def _repopath(self):
657 656 return self._path
658 657
659 658 def __nonzero__(self):
660 659 try:
661 660 self._filenode
662 661 return True
663 662 except error.LookupError:
664 663 # file is missing
665 664 return False
666 665
667 666 __bool__ = __nonzero__
668 667
669 668 def __bytes__(self):
670 669 try:
671 670 return "%s@%s" % (self.path(), self._changectx)
672 671 except error.LookupError:
673 672 return "%s@???" % self.path()
674 673
675 674 __str__ = encoding.strmethod(__bytes__)
676 675
677 676 def __repr__(self):
678 677 return r"<%s %s>" % (type(self).__name__, str(self))
679 678
680 679 def __hash__(self):
681 680 try:
682 681 return hash((self._path, self._filenode))
683 682 except AttributeError:
684 683 return id(self)
685 684
686 685 def __eq__(self, other):
687 686 try:
688 687 return (type(self) == type(other) and self._path == other._path
689 688 and self._filenode == other._filenode)
690 689 except AttributeError:
691 690 return False
692 691
693 692 def __ne__(self, other):
694 693 return not (self == other)
695 694
696 695 def filerev(self):
697 696 return self._filerev
698 697 def filenode(self):
699 698 return self._filenode
700 699 @propertycache
701 700 def _flags(self):
702 701 return self._changectx.flags(self._path)
703 702 def flags(self):
704 703 return self._flags
705 704 def filelog(self):
706 705 return self._filelog
707 706 def rev(self):
708 707 return self._changeid
709 708 def linkrev(self):
710 709 return self._filelog.linkrev(self._filerev)
711 710 def node(self):
712 711 return self._changectx.node()
713 712 def hex(self):
714 713 return self._changectx.hex()
715 714 def user(self):
716 715 return self._changectx.user()
717 716 def date(self):
718 717 return self._changectx.date()
719 718 def files(self):
720 719 return self._changectx.files()
721 720 def description(self):
722 721 return self._changectx.description()
723 722 def branch(self):
724 723 return self._changectx.branch()
725 724 def extra(self):
726 725 return self._changectx.extra()
727 726 def phase(self):
728 727 return self._changectx.phase()
729 728 def phasestr(self):
730 729 return self._changectx.phasestr()
731 730 def obsolete(self):
732 731 return self._changectx.obsolete()
733 732 def instabilities(self):
734 733 return self._changectx.instabilities()
735 734 def manifest(self):
736 735 return self._changectx.manifest()
737 736 def changectx(self):
738 737 return self._changectx
739 738 def renamed(self):
740 739 return self._copied
741 740 def repo(self):
742 741 return self._repo
743 742 def size(self):
744 743 return len(self.data())
745 744
746 745 def path(self):
747 746 return self._path
748 747
749 748 def isbinary(self):
750 749 try:
751 750 return stringutil.binary(self.data())
752 751 except IOError:
753 752 return False
754 753 def isexec(self):
755 754 return 'x' in self.flags()
756 755 def islink(self):
757 756 return 'l' in self.flags()
758 757
759 758 def isabsent(self):
760 759 """whether this filectx represents a file not in self._changectx
761 760
762 761 This is mainly for merge code to detect change/delete conflicts. This is
763 762 expected to be True for all subclasses of basectx."""
764 763 return False
765 764
766 765 _customcmp = False
767 766 def cmp(self, fctx):
768 767 """compare with other file context
769 768
770 769 returns True if different than fctx.
771 770 """
772 771 if fctx._customcmp:
773 772 return fctx.cmp(self)
774 773
775 774 if (fctx._filenode is None
776 775 and (self._repo._encodefilterpats
777 776 # if file data starts with '\1\n', empty metadata block is
778 777 # prepended, which adds 4 bytes to filelog.size().
779 778 or self.size() - 4 == fctx.size())
780 779 or self.size() == fctx.size()):
781 780 return self._filelog.cmp(self._filenode, fctx.data())
782 781
783 782 return True
784 783
785 784 def _adjustlinkrev(self, srcrev, inclusive=False):
786 785 """return the first ancestor of <srcrev> introducing <fnode>
787 786
788 787 If the linkrev of the file revision does not point to an ancestor of
789 788 srcrev, we'll walk down the ancestors until we find one introducing
790 789 this file revision.
791 790
792 791 :srcrev: the changeset revision we search ancestors from
793 792 :inclusive: if true, the src revision will also be checked
794 793 """
795 794 repo = self._repo
796 795 cl = repo.unfiltered().changelog
797 796 mfl = repo.manifestlog
798 797 # fetch the linkrev
799 798 lkr = self.linkrev()
800 799 # hack to reuse ancestor computation when searching for renames
801 800 memberanc = getattr(self, '_ancestrycontext', None)
802 801 iteranc = None
803 802 if srcrev is None:
804 803 # wctx case, used by workingfilectx during mergecopy
805 804 revs = [p.rev() for p in self._repo[None].parents()]
806 805 inclusive = True # we skipped the real (revless) source
807 806 else:
808 807 revs = [srcrev]
809 808 if memberanc is None:
810 809 memberanc = iteranc = cl.ancestors(revs, lkr,
811 810 inclusive=inclusive)
812 811 # check if this linkrev is an ancestor of srcrev
813 812 if lkr not in memberanc:
814 813 if iteranc is None:
815 814 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
816 815 fnode = self._filenode
817 816 path = self._path
818 817 for a in iteranc:
819 818 ac = cl.read(a) # get changeset data (we avoid object creation)
820 819 if path in ac[3]: # checking the 'files' field.
821 820 # The file has been touched, check if the content is
822 821 # similar to the one we search for.
823 822 if fnode == mfl[ac[0]].readfast().get(path):
824 823 return a
825 824 # In theory, we should never get out of that loop without a result.
826 825 # But if manifest uses a buggy file revision (not children of the
827 826 # one it replaces) we could. Such a buggy situation will likely
828 827 # result is crash somewhere else at to some point.
829 828 return lkr
830 829
831 830 def introrev(self):
832 831 """return the rev of the changeset which introduced this file revision
833 832
834 833 This method is different from linkrev because it take into account the
835 834 changeset the filectx was created from. It ensures the returned
836 835 revision is one of its ancestors. This prevents bugs from
837 836 'linkrev-shadowing' when a file revision is used by multiple
838 837 changesets.
839 838 """
840 839 lkr = self.linkrev()
841 840 attrs = vars(self)
842 841 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
843 842 if noctx or self.rev() == lkr:
844 843 return self.linkrev()
845 844 return self._adjustlinkrev(self.rev(), inclusive=True)
846 845
847 846 def introfilectx(self):
848 847 """Return filectx having identical contents, but pointing to the
849 848 changeset revision where this filectx was introduced"""
850 849 introrev = self.introrev()
851 850 if self.rev() == introrev:
852 851 return self
853 852 return self.filectx(self.filenode(), changeid=introrev)
854 853
855 854 def _parentfilectx(self, path, fileid, filelog):
856 855 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
857 856 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
858 857 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
859 858 # If self is associated with a changeset (probably explicitly
860 859 # fed), ensure the created filectx is associated with a
861 860 # changeset that is an ancestor of self.changectx.
862 861 # This lets us later use _adjustlinkrev to get a correct link.
863 862 fctx._descendantrev = self.rev()
864 863 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
865 864 elif r'_descendantrev' in vars(self):
866 865 # Otherwise propagate _descendantrev if we have one associated.
867 866 fctx._descendantrev = self._descendantrev
868 867 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
869 868 return fctx
870 869
871 870 def parents(self):
872 871 _path = self._path
873 872 fl = self._filelog
874 873 parents = self._filelog.parents(self._filenode)
875 874 pl = [(_path, node, fl) for node in parents if node != nullid]
876 875
877 876 r = fl.renamed(self._filenode)
878 877 if r:
879 878 # - In the simple rename case, both parent are nullid, pl is empty.
880 879 # - In case of merge, only one of the parent is null id and should
881 880 # be replaced with the rename information. This parent is -always-
882 881 # the first one.
883 882 #
884 883 # As null id have always been filtered out in the previous list
885 884 # comprehension, inserting to 0 will always result in "replacing
886 885 # first nullid parent with rename information.
887 886 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
888 887
889 888 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
890 889
891 890 def p1(self):
892 891 return self.parents()[0]
893 892
894 893 def p2(self):
895 894 p = self.parents()
896 895 if len(p) == 2:
897 896 return p[1]
898 897 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
899 898
900 899 def annotate(self, follow=False, skiprevs=None, diffopts=None):
901 900 """Returns a list of annotateline objects for each line in the file
902 901
903 902 - line.fctx is the filectx of the node where that line was last changed
904 903 - line.lineno is the line number at the first appearance in the managed
905 904 file
906 905 - line.text is the data on that line (including newline character)
907 906 """
908 907 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
909 908
910 909 def parents(f):
911 910 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
912 911 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
913 912 # from the topmost introrev (= srcrev) down to p.linkrev() if it
914 913 # isn't an ancestor of the srcrev.
915 914 f._changeid
916 915 pl = f.parents()
917 916
918 917 # Don't return renamed parents if we aren't following.
919 918 if not follow:
920 919 pl = [p for p in pl if p.path() == f.path()]
921 920
922 921 # renamed filectx won't have a filelog yet, so set it
923 922 # from the cache to save time
924 923 for p in pl:
925 924 if not r'_filelog' in p.__dict__:
926 925 p._filelog = getlog(p.path())
927 926
928 927 return pl
929 928
930 929 # use linkrev to find the first changeset where self appeared
931 930 base = self.introfilectx()
932 931 if getattr(base, '_ancestrycontext', None) is None:
933 932 cl = self._repo.changelog
934 933 if base.rev() is None:
935 934 # wctx is not inclusive, but works because _ancestrycontext
936 935 # is used to test filelog revisions
937 936 ac = cl.ancestors([p.rev() for p in base.parents()],
938 937 inclusive=True)
939 938 else:
940 939 ac = cl.ancestors([base.rev()], inclusive=True)
941 940 base._ancestrycontext = ac
942 941
943 942 return dagop.annotate(base, parents, skiprevs=skiprevs,
944 943 diffopts=diffopts)
945 944
946 945 def ancestors(self, followfirst=False):
947 946 visit = {}
948 947 c = self
949 948 if followfirst:
950 949 cut = 1
951 950 else:
952 951 cut = None
953 952
954 953 while True:
955 954 for parent in c.parents()[:cut]:
956 955 visit[(parent.linkrev(), parent.filenode())] = parent
957 956 if not visit:
958 957 break
959 958 c = visit.pop(max(visit))
960 959 yield c
961 960
962 961 def decodeddata(self):
963 962 """Returns `data()` after running repository decoding filters.
964 963
965 964 This is often equivalent to how the data would be expressed on disk.
966 965 """
967 966 return self._repo.wwritedata(self.path(), self.data())
968 967
969 968 class filectx(basefilectx):
970 969 """A filecontext object makes access to data related to a particular
971 970 filerevision convenient."""
972 971 def __init__(self, repo, path, changeid=None, fileid=None,
973 972 filelog=None, changectx=None):
974 973 """changeid can be a changeset revision, node, or tag.
975 974 fileid can be a file revision or node."""
976 975 self._repo = repo
977 976 self._path = path
978 977
979 978 assert (changeid is not None
980 979 or fileid is not None
981 980 or changectx is not None), \
982 981 ("bad args: changeid=%r, fileid=%r, changectx=%r"
983 982 % (changeid, fileid, changectx))
984 983
985 984 if filelog is not None:
986 985 self._filelog = filelog
987 986
988 987 if changeid is not None:
989 988 self._changeid = changeid
990 989 if changectx is not None:
991 990 self._changectx = changectx
992 991 if fileid is not None:
993 992 self._fileid = fileid
994 993
995 994 @propertycache
996 995 def _changectx(self):
997 996 try:
998 997 return changectx(self._repo, self._changeid)
999 998 except error.FilteredRepoLookupError:
1000 999 # Linkrev may point to any revision in the repository. When the
1001 1000 # repository is filtered this may lead to `filectx` trying to build
1002 1001 # `changectx` for filtered revision. In such case we fallback to
1003 1002 # creating `changectx` on the unfiltered version of the reposition.
1004 1003 # This fallback should not be an issue because `changectx` from
1005 1004 # `filectx` are not used in complex operations that care about
1006 1005 # filtering.
1007 1006 #
1008 1007 # This fallback is a cheap and dirty fix that prevent several
1009 1008 # crashes. It does not ensure the behavior is correct. However the
1010 1009 # behavior was not correct before filtering either and "incorrect
1011 1010 # behavior" is seen as better as "crash"
1012 1011 #
1013 1012 # Linkrevs have several serious troubles with filtering that are
1014 1013 # complicated to solve. Proper handling of the issue here should be
1015 1014 # considered when solving linkrev issue are on the table.
1016 1015 return changectx(self._repo.unfiltered(), self._changeid)
1017 1016
1018 1017 def filectx(self, fileid, changeid=None):
1019 1018 '''opens an arbitrary revision of the file without
1020 1019 opening a new filelog'''
1021 1020 return filectx(self._repo, self._path, fileid=fileid,
1022 1021 filelog=self._filelog, changeid=changeid)
1023 1022
1024 1023 def rawdata(self):
1025 1024 return self._filelog.revision(self._filenode, raw=True)
1026 1025
1027 1026 def rawflags(self):
1028 1027 """low-level revlog flags"""
1029 1028 return self._filelog.flags(self._filerev)
1030 1029
1031 1030 def data(self):
1032 1031 try:
1033 1032 return self._filelog.read(self._filenode)
1034 1033 except error.CensoredNodeError:
1035 1034 if self._repo.ui.config("censor", "policy") == "ignore":
1036 1035 return ""
1037 1036 raise error.Abort(_("censored node: %s") % short(self._filenode),
1038 1037 hint=_("set censor.policy to ignore errors"))
1039 1038
1040 1039 def size(self):
1041 1040 return self._filelog.size(self._filerev)
1042 1041
1043 1042 @propertycache
1044 1043 def _copied(self):
1045 1044 """check if file was actually renamed in this changeset revision
1046 1045
1047 1046 If rename logged in file revision, we report copy for changeset only
1048 1047 if file revisions linkrev points back to the changeset in question
1049 1048 or both changeset parents contain different file revisions.
1050 1049 """
1051 1050
1052 1051 renamed = self._filelog.renamed(self._filenode)
1053 1052 if not renamed:
1054 1053 return None
1055 1054
1056 1055 if self.rev() == self.linkrev():
1057 1056 return renamed
1058 1057
1059 1058 name = self.path()
1060 1059 fnode = self._filenode
1061 1060 for p in self._changectx.parents():
1062 1061 try:
1063 1062 if fnode == p.filenode(name):
1064 1063 return None
1065 1064 except error.LookupError:
1066 1065 pass
1067 1066 return renamed
1068 1067
1069 1068 def children(self):
1070 1069 # hard for renames
1071 1070 c = self._filelog.children(self._filenode)
1072 1071 return [filectx(self._repo, self._path, fileid=x,
1073 1072 filelog=self._filelog) for x in c]
1074 1073
1075 1074 class committablectx(basectx):
1076 1075 """A committablectx object provides common functionality for a context that
1077 1076 wants the ability to commit, e.g. workingctx or memctx."""
1078 1077 def __init__(self, repo, text="", user=None, date=None, extra=None,
1079 1078 changes=None):
1080 1079 super(committablectx, self).__init__(repo)
1081 1080 self._rev = None
1082 1081 self._node = None
1083 1082 self._text = text
1084 1083 if date:
1085 1084 self._date = dateutil.parsedate(date)
1086 1085 if user:
1087 1086 self._user = user
1088 1087 if changes:
1089 1088 self._status = changes
1090 1089
1091 1090 self._extra = {}
1092 1091 if extra:
1093 1092 self._extra = extra.copy()
1094 1093 if 'branch' not in self._extra:
1095 1094 try:
1096 1095 branch = encoding.fromlocal(self._repo.dirstate.branch())
1097 1096 except UnicodeDecodeError:
1098 1097 raise error.Abort(_('branch name not in UTF-8!'))
1099 1098 self._extra['branch'] = branch
1100 1099 if self._extra['branch'] == '':
1101 1100 self._extra['branch'] = 'default'
1102 1101
1103 1102 def __bytes__(self):
1104 1103 return bytes(self._parents[0]) + "+"
1105 1104
1106 1105 __str__ = encoding.strmethod(__bytes__)
1107 1106
1108 1107 def __nonzero__(self):
1109 1108 return True
1110 1109
1111 1110 __bool__ = __nonzero__
1112 1111
1113 1112 def _buildflagfunc(self):
1114 1113 # Create a fallback function for getting file flags when the
1115 1114 # filesystem doesn't support them
1116 1115
1117 1116 copiesget = self._repo.dirstate.copies().get
1118 1117 parents = self.parents()
1119 1118 if len(parents) < 2:
1120 1119 # when we have one parent, it's easy: copy from parent
1121 1120 man = parents[0].manifest()
1122 1121 def func(f):
1123 1122 f = copiesget(f, f)
1124 1123 return man.flags(f)
1125 1124 else:
1126 1125 # merges are tricky: we try to reconstruct the unstored
1127 1126 # result from the merge (issue1802)
1128 1127 p1, p2 = parents
1129 1128 pa = p1.ancestor(p2)
1130 1129 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1131 1130
1132 1131 def func(f):
1133 1132 f = copiesget(f, f) # may be wrong for merges with copies
1134 1133 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1135 1134 if fl1 == fl2:
1136 1135 return fl1
1137 1136 if fl1 == fla:
1138 1137 return fl2
1139 1138 if fl2 == fla:
1140 1139 return fl1
1141 1140 return '' # punt for conflicts
1142 1141
1143 1142 return func
1144 1143
1145 1144 @propertycache
1146 1145 def _flagfunc(self):
1147 1146 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1148 1147
1149 1148 @propertycache
1150 1149 def _status(self):
1151 1150 return self._repo.status()
1152 1151
1153 1152 @propertycache
1154 1153 def _user(self):
1155 1154 return self._repo.ui.username()
1156 1155
1157 1156 @propertycache
1158 1157 def _date(self):
1159 1158 ui = self._repo.ui
1160 1159 date = ui.configdate('devel', 'default-date')
1161 1160 if date is None:
1162 1161 date = dateutil.makedate()
1163 1162 return date
1164 1163
1165 1164 def subrev(self, subpath):
1166 1165 return None
1167 1166
1168 1167 def manifestnode(self):
1169 1168 return None
1170 1169 def user(self):
1171 1170 return self._user or self._repo.ui.username()
1172 1171 def date(self):
1173 1172 return self._date
1174 1173 def description(self):
1175 1174 return self._text
1176 1175 def files(self):
1177 1176 return sorted(self._status.modified + self._status.added +
1178 1177 self._status.removed)
1179 1178
1180 1179 def modified(self):
1181 1180 return self._status.modified
1182 1181 def added(self):
1183 1182 return self._status.added
1184 1183 def removed(self):
1185 1184 return self._status.removed
1186 1185 def deleted(self):
1187 1186 return self._status.deleted
1188 1187 def branch(self):
1189 1188 return encoding.tolocal(self._extra['branch'])
1190 1189 def closesbranch(self):
1191 1190 return 'close' in self._extra
1192 1191 def extra(self):
1193 1192 return self._extra
1194 1193
1195 1194 def isinmemory(self):
1196 1195 return False
1197 1196
1198 1197 def tags(self):
1199 1198 return []
1200 1199
1201 1200 def bookmarks(self):
1202 1201 b = []
1203 1202 for p in self.parents():
1204 1203 b.extend(p.bookmarks())
1205 1204 return b
1206 1205
1207 1206 def phase(self):
1208 1207 phase = phases.draft # default phase to draft
1209 1208 for p in self.parents():
1210 1209 phase = max(phase, p.phase())
1211 1210 return phase
1212 1211
1213 1212 def hidden(self):
1214 1213 return False
1215 1214
1216 1215 def children(self):
1217 1216 return []
1218 1217
1219 1218 def flags(self, path):
1220 1219 if r'_manifest' in self.__dict__:
1221 1220 try:
1222 1221 return self._manifest.flags(path)
1223 1222 except KeyError:
1224 1223 return ''
1225 1224
1226 1225 try:
1227 1226 return self._flagfunc(path)
1228 1227 except OSError:
1229 1228 return ''
1230 1229
1231 1230 def ancestor(self, c2):
1232 1231 """return the "best" ancestor context of self and c2"""
1233 1232 return self._parents[0].ancestor(c2) # punt on two parents for now
1234 1233
1235 1234 def walk(self, match):
1236 1235 '''Generates matching file names.'''
1237 1236 return sorted(self._repo.dirstate.walk(match,
1238 1237 subrepos=sorted(self.substate),
1239 1238 unknown=True, ignored=False))
1240 1239
1241 1240 def matches(self, match):
1242 1241 ds = self._repo.dirstate
1243 1242 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1244 1243
1245 1244 def ancestors(self):
1246 1245 for p in self._parents:
1247 1246 yield p
1248 1247 for a in self._repo.changelog.ancestors(
1249 1248 [p.rev() for p in self._parents]):
1250 1249 yield changectx(self._repo, a)
1251 1250
1252 1251 def markcommitted(self, node):
1253 1252 """Perform post-commit cleanup necessary after committing this ctx
1254 1253
1255 1254 Specifically, this updates backing stores this working context
1256 1255 wraps to reflect the fact that the changes reflected by this
1257 1256 workingctx have been committed. For example, it marks
1258 1257 modified and added files as normal in the dirstate.
1259 1258
1260 1259 """
1261 1260
1262 1261 with self._repo.dirstate.parentchange():
1263 1262 for f in self.modified() + self.added():
1264 1263 self._repo.dirstate.normal(f)
1265 1264 for f in self.removed():
1266 1265 self._repo.dirstate.drop(f)
1267 1266 self._repo.dirstate.setparents(node)
1268 1267
1269 1268 # write changes out explicitly, because nesting wlock at
1270 1269 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1271 1270 # from immediately doing so for subsequent changing files
1272 1271 self._repo.dirstate.write(self._repo.currenttransaction())
1273 1272
1274 1273 def dirty(self, missing=False, merge=True, branch=True):
1275 1274 return False
1276 1275
1277 1276 class workingctx(committablectx):
1278 1277 """A workingctx object makes access to data related to
1279 1278 the current working directory convenient.
1280 1279 date - any valid date string or (unixtime, offset), or None.
1281 1280 user - username string, or None.
1282 1281 extra - a dictionary of extra values, or None.
1283 1282 changes - a list of file lists as returned by localrepo.status()
1284 1283 or None to use the repository status.
1285 1284 """
1286 1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1287 1286 changes=None):
1288 1287 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1289 1288
1290 1289 def __iter__(self):
1291 1290 d = self._repo.dirstate
1292 1291 for f in d:
1293 1292 if d[f] != 'r':
1294 1293 yield f
1295 1294
1296 1295 def __contains__(self, key):
1297 1296 return self._repo.dirstate[key] not in "?r"
1298 1297
1299 1298 def hex(self):
1300 1299 return hex(wdirid)
1301 1300
1302 1301 @propertycache
1303 1302 def _parents(self):
1304 1303 p = self._repo.dirstate.parents()
1305 1304 if p[1] == nullid:
1306 1305 p = p[:-1]
1307 1306 return [changectx(self._repo, x) for x in p]
1308 1307
1309 1308 def _fileinfo(self, path):
1310 1309 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1311 1310 self._manifest
1312 1311 return super(workingctx, self)._fileinfo(path)
1313 1312
1314 1313 def filectx(self, path, filelog=None):
1315 1314 """get a file context from the working directory"""
1316 1315 return workingfilectx(self._repo, path, workingctx=self,
1317 1316 filelog=filelog)
1318 1317
1319 1318 def dirty(self, missing=False, merge=True, branch=True):
1320 1319 "check whether a working directory is modified"
1321 1320 # check subrepos first
1322 1321 for s in sorted(self.substate):
1323 1322 if self.sub(s).dirty(missing=missing):
1324 1323 return True
1325 1324 # check current working dir
1326 1325 return ((merge and self.p2()) or
1327 1326 (branch and self.branch() != self.p1().branch()) or
1328 1327 self.modified() or self.added() or self.removed() or
1329 1328 (missing and self.deleted()))
1330 1329
1331 1330 def add(self, list, prefix=""):
1332 1331 with self._repo.wlock():
1333 1332 ui, ds = self._repo.ui, self._repo.dirstate
1334 1333 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1335 1334 rejected = []
1336 1335 lstat = self._repo.wvfs.lstat
1337 1336 for f in list:
1338 1337 # ds.pathto() returns an absolute file when this is invoked from
1339 1338 # the keyword extension. That gets flagged as non-portable on
1340 1339 # Windows, since it contains the drive letter and colon.
1341 1340 scmutil.checkportable(ui, os.path.join(prefix, f))
1342 1341 try:
1343 1342 st = lstat(f)
1344 1343 except OSError:
1345 1344 ui.warn(_("%s does not exist!\n") % uipath(f))
1346 1345 rejected.append(f)
1347 1346 continue
1348 1347 limit = ui.configbytes('ui', 'large-file-limit')
1349 1348 if limit != 0 and st.st_size > limit:
1350 1349 ui.warn(_("%s: up to %d MB of RAM may be required "
1351 1350 "to manage this file\n"
1352 1351 "(use 'hg revert %s' to cancel the "
1353 1352 "pending addition)\n")
1354 1353 % (f, 3 * st.st_size // 1000000, uipath(f)))
1355 1354 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1356 1355 ui.warn(_("%s not added: only files and symlinks "
1357 1356 "supported currently\n") % uipath(f))
1358 1357 rejected.append(f)
1359 1358 elif ds[f] in 'amn':
1360 1359 ui.warn(_("%s already tracked!\n") % uipath(f))
1361 1360 elif ds[f] == 'r':
1362 1361 ds.normallookup(f)
1363 1362 else:
1364 1363 ds.add(f)
1365 1364 return rejected
1366 1365
1367 1366 def forget(self, files, prefix=""):
1368 1367 with self._repo.wlock():
1369 1368 ds = self._repo.dirstate
1370 1369 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1371 1370 rejected = []
1372 1371 for f in files:
1373 1372 if f not in self._repo.dirstate:
1374 1373 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1375 1374 rejected.append(f)
1376 1375 elif self._repo.dirstate[f] != 'a':
1377 1376 self._repo.dirstate.remove(f)
1378 1377 else:
1379 1378 self._repo.dirstate.drop(f)
1380 1379 return rejected
1381 1380
1382 1381 def undelete(self, list):
1383 1382 pctxs = self.parents()
1384 1383 with self._repo.wlock():
1385 1384 ds = self._repo.dirstate
1386 1385 for f in list:
1387 1386 if self._repo.dirstate[f] != 'r':
1388 1387 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1389 1388 else:
1390 1389 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1391 1390 t = fctx.data()
1392 1391 self._repo.wwrite(f, t, fctx.flags())
1393 1392 self._repo.dirstate.normal(f)
1394 1393
1395 1394 def copy(self, source, dest):
1396 1395 try:
1397 1396 st = self._repo.wvfs.lstat(dest)
1398 1397 except OSError as err:
1399 1398 if err.errno != errno.ENOENT:
1400 1399 raise
1401 1400 self._repo.ui.warn(_("%s does not exist!\n")
1402 1401 % self._repo.dirstate.pathto(dest))
1403 1402 return
1404 1403 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1405 1404 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1406 1405 "symbolic link\n")
1407 1406 % self._repo.dirstate.pathto(dest))
1408 1407 else:
1409 1408 with self._repo.wlock():
1410 1409 if self._repo.dirstate[dest] in '?':
1411 1410 self._repo.dirstate.add(dest)
1412 1411 elif self._repo.dirstate[dest] in 'r':
1413 1412 self._repo.dirstate.normallookup(dest)
1414 1413 self._repo.dirstate.copy(source, dest)
1415 1414
1416 1415 def match(self, pats=None, include=None, exclude=None, default='glob',
1417 1416 listsubrepos=False, badfn=None):
1418 1417 r = self._repo
1419 1418
1420 1419 # Only a case insensitive filesystem needs magic to translate user input
1421 1420 # to actual case in the filesystem.
1422 1421 icasefs = not util.fscasesensitive(r.root)
1423 1422 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1424 1423 default, auditor=r.auditor, ctx=self,
1425 1424 listsubrepos=listsubrepos, badfn=badfn,
1426 1425 icasefs=icasefs)
1427 1426
1428 1427 def _filtersuspectsymlink(self, files):
1429 1428 if not files or self._repo.dirstate._checklink:
1430 1429 return files
1431 1430
1432 1431 # Symlink placeholders may get non-symlink-like contents
1433 1432 # via user error or dereferencing by NFS or Samba servers,
1434 1433 # so we filter out any placeholders that don't look like a
1435 1434 # symlink
1436 1435 sane = []
1437 1436 for f in files:
1438 1437 if self.flags(f) == 'l':
1439 1438 d = self[f].data()
1440 1439 if (d == '' or len(d) >= 1024 or '\n' in d
1441 1440 or stringutil.binary(d)):
1442 1441 self._repo.ui.debug('ignoring suspect symlink placeholder'
1443 1442 ' "%s"\n' % f)
1444 1443 continue
1445 1444 sane.append(f)
1446 1445 return sane
1447 1446
1448 1447 def _checklookup(self, files):
1449 1448 # check for any possibly clean files
1450 1449 if not files:
1451 1450 return [], [], []
1452 1451
1453 1452 modified = []
1454 1453 deleted = []
1455 1454 fixup = []
1456 1455 pctx = self._parents[0]
1457 1456 # do a full compare of any files that might have changed
1458 1457 for f in sorted(files):
1459 1458 try:
1460 1459 # This will return True for a file that got replaced by a
1461 1460 # directory in the interim, but fixing that is pretty hard.
1462 1461 if (f not in pctx or self.flags(f) != pctx.flags(f)
1463 1462 or pctx[f].cmp(self[f])):
1464 1463 modified.append(f)
1465 1464 else:
1466 1465 fixup.append(f)
1467 1466 except (IOError, OSError):
1468 1467 # A file become inaccessible in between? Mark it as deleted,
1469 1468 # matching dirstate behavior (issue5584).
1470 1469 # The dirstate has more complex behavior around whether a
1471 1470 # missing file matches a directory, etc, but we don't need to
1472 1471 # bother with that: if f has made it to this point, we're sure
1473 1472 # it's in the dirstate.
1474 1473 deleted.append(f)
1475 1474
1476 1475 return modified, deleted, fixup
1477 1476
1478 1477 def _poststatusfixup(self, status, fixup):
1479 1478 """update dirstate for files that are actually clean"""
1480 1479 poststatus = self._repo.postdsstatus()
1481 1480 if fixup or poststatus:
1482 1481 try:
1483 1482 oldid = self._repo.dirstate.identity()
1484 1483
1485 1484 # updating the dirstate is optional
1486 1485 # so we don't wait on the lock
1487 1486 # wlock can invalidate the dirstate, so cache normal _after_
1488 1487 # taking the lock
1489 1488 with self._repo.wlock(False):
1490 1489 if self._repo.dirstate.identity() == oldid:
1491 1490 if fixup:
1492 1491 normal = self._repo.dirstate.normal
1493 1492 for f in fixup:
1494 1493 normal(f)
1495 1494 # write changes out explicitly, because nesting
1496 1495 # wlock at runtime may prevent 'wlock.release()'
1497 1496 # after this block from doing so for subsequent
1498 1497 # changing files
1499 1498 tr = self._repo.currenttransaction()
1500 1499 self._repo.dirstate.write(tr)
1501 1500
1502 1501 if poststatus:
1503 1502 for ps in poststatus:
1504 1503 ps(self, status)
1505 1504 else:
1506 1505 # in this case, writing changes out breaks
1507 1506 # consistency, because .hg/dirstate was
1508 1507 # already changed simultaneously after last
1509 1508 # caching (see also issue5584 for detail)
1510 1509 self._repo.ui.debug('skip updating dirstate: '
1511 1510 'identity mismatch\n')
1512 1511 except error.LockError:
1513 1512 pass
1514 1513 finally:
1515 1514 # Even if the wlock couldn't be grabbed, clear out the list.
1516 1515 self._repo.clearpostdsstatus()
1517 1516
1518 1517 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1519 1518 '''Gets the status from the dirstate -- internal use only.'''
1520 1519 subrepos = []
1521 1520 if '.hgsub' in self:
1522 1521 subrepos = sorted(self.substate)
1523 1522 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1524 1523 clean=clean, unknown=unknown)
1525 1524
1526 1525 # check for any possibly clean files
1527 1526 fixup = []
1528 1527 if cmp:
1529 1528 modified2, deleted2, fixup = self._checklookup(cmp)
1530 1529 s.modified.extend(modified2)
1531 1530 s.deleted.extend(deleted2)
1532 1531
1533 1532 if fixup and clean:
1534 1533 s.clean.extend(fixup)
1535 1534
1536 1535 self._poststatusfixup(s, fixup)
1537 1536
1538 1537 if match.always():
1539 1538 # cache for performance
1540 1539 if s.unknown or s.ignored or s.clean:
1541 1540 # "_status" is cached with list*=False in the normal route
1542 1541 self._status = scmutil.status(s.modified, s.added, s.removed,
1543 1542 s.deleted, [], [], [])
1544 1543 else:
1545 1544 self._status = s
1546 1545
1547 1546 return s
1548 1547
1549 1548 @propertycache
1550 1549 def _manifest(self):
1551 1550 """generate a manifest corresponding to the values in self._status
1552 1551
1553 1552 This reuse the file nodeid from parent, but we use special node
1554 1553 identifiers for added and modified files. This is used by manifests
1555 1554 merge to see that files are different and by update logic to avoid
1556 1555 deleting newly added files.
1557 1556 """
1558 1557 return self._buildstatusmanifest(self._status)
1559 1558
1560 1559 def _buildstatusmanifest(self, status):
1561 1560 """Builds a manifest that includes the given status results."""
1562 1561 parents = self.parents()
1563 1562
1564 1563 man = parents[0].manifest().copy()
1565 1564
1566 1565 ff = self._flagfunc
1567 1566 for i, l in ((addednodeid, status.added),
1568 1567 (modifiednodeid, status.modified)):
1569 1568 for f in l:
1570 1569 man[f] = i
1571 1570 try:
1572 1571 man.setflag(f, ff(f))
1573 1572 except OSError:
1574 1573 pass
1575 1574
1576 1575 for f in status.deleted + status.removed:
1577 1576 if f in man:
1578 1577 del man[f]
1579 1578
1580 1579 return man
1581 1580
1582 1581 def _buildstatus(self, other, s, match, listignored, listclean,
1583 1582 listunknown):
1584 1583 """build a status with respect to another context
1585 1584
1586 1585 This includes logic for maintaining the fast path of status when
1587 1586 comparing the working directory against its parent, which is to skip
1588 1587 building a new manifest if self (working directory) is not comparing
1589 1588 against its parent (repo['.']).
1590 1589 """
1591 1590 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1592 1591 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1593 1592 # might have accidentally ended up with the entire contents of the file
1594 1593 # they are supposed to be linking to.
1595 1594 s.modified[:] = self._filtersuspectsymlink(s.modified)
1596 1595 if other != self._repo['.']:
1597 1596 s = super(workingctx, self)._buildstatus(other, s, match,
1598 1597 listignored, listclean,
1599 1598 listunknown)
1600 1599 return s
1601 1600
1602 1601 def _matchstatus(self, other, match):
1603 1602 """override the match method with a filter for directory patterns
1604 1603
1605 1604 We use inheritance to customize the match.bad method only in cases of
1606 1605 workingctx since it belongs only to the working directory when
1607 1606 comparing against the parent changeset.
1608 1607
1609 1608 If we aren't comparing against the working directory's parent, then we
1610 1609 just use the default match object sent to us.
1611 1610 """
1612 1611 if other != self._repo['.']:
1613 1612 def bad(f, msg):
1614 1613 # 'f' may be a directory pattern from 'match.files()',
1615 1614 # so 'f not in ctx1' is not enough
1616 1615 if f not in other and not other.hasdir(f):
1617 1616 self._repo.ui.warn('%s: %s\n' %
1618 1617 (self._repo.dirstate.pathto(f), msg))
1619 1618 match.bad = bad
1620 1619 return match
1621 1620
1622 1621 def markcommitted(self, node):
1623 1622 super(workingctx, self).markcommitted(node)
1624 1623
1625 1624 sparse.aftercommit(self._repo, node)
1626 1625
1627 1626 class committablefilectx(basefilectx):
1628 1627 """A committablefilectx provides common functionality for a file context
1629 1628 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1630 1629 def __init__(self, repo, path, filelog=None, ctx=None):
1631 1630 self._repo = repo
1632 1631 self._path = path
1633 1632 self._changeid = None
1634 1633 self._filerev = self._filenode = None
1635 1634
1636 1635 if filelog is not None:
1637 1636 self._filelog = filelog
1638 1637 if ctx:
1639 1638 self._changectx = ctx
1640 1639
1641 1640 def __nonzero__(self):
1642 1641 return True
1643 1642
1644 1643 __bool__ = __nonzero__
1645 1644
1646 1645 def linkrev(self):
1647 1646 # linked to self._changectx no matter if file is modified or not
1648 1647 return self.rev()
1649 1648
1650 1649 def parents(self):
1651 1650 '''return parent filectxs, following copies if necessary'''
1652 1651 def filenode(ctx, path):
1653 1652 return ctx._manifest.get(path, nullid)
1654 1653
1655 1654 path = self._path
1656 1655 fl = self._filelog
1657 1656 pcl = self._changectx._parents
1658 1657 renamed = self.renamed()
1659 1658
1660 1659 if renamed:
1661 1660 pl = [renamed + (None,)]
1662 1661 else:
1663 1662 pl = [(path, filenode(pcl[0], path), fl)]
1664 1663
1665 1664 for pc in pcl[1:]:
1666 1665 pl.append((path, filenode(pc, path), fl))
1667 1666
1668 1667 return [self._parentfilectx(p, fileid=n, filelog=l)
1669 1668 for p, n, l in pl if n != nullid]
1670 1669
1671 1670 def children(self):
1672 1671 return []
1673 1672
1674 1673 class workingfilectx(committablefilectx):
1675 1674 """A workingfilectx object makes access to data related to a particular
1676 1675 file in the working directory convenient."""
1677 1676 def __init__(self, repo, path, filelog=None, workingctx=None):
1678 1677 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1679 1678
1680 1679 @propertycache
1681 1680 def _changectx(self):
1682 1681 return workingctx(self._repo)
1683 1682
1684 1683 def data(self):
1685 1684 return self._repo.wread(self._path)
1686 1685 def renamed(self):
1687 1686 rp = self._repo.dirstate.copied(self._path)
1688 1687 if not rp:
1689 1688 return None
1690 1689 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1691 1690
1692 1691 def size(self):
1693 1692 return self._repo.wvfs.lstat(self._path).st_size
1694 1693 def date(self):
1695 1694 t, tz = self._changectx.date()
1696 1695 try:
1697 1696 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1698 1697 except OSError as err:
1699 1698 if err.errno != errno.ENOENT:
1700 1699 raise
1701 1700 return (t, tz)
1702 1701
1703 1702 def exists(self):
1704 1703 return self._repo.wvfs.exists(self._path)
1705 1704
1706 1705 def lexists(self):
1707 1706 return self._repo.wvfs.lexists(self._path)
1708 1707
1709 1708 def audit(self):
1710 1709 return self._repo.wvfs.audit(self._path)
1711 1710
1712 1711 def cmp(self, fctx):
1713 1712 """compare with other file context
1714 1713
1715 1714 returns True if different than fctx.
1716 1715 """
1717 1716 # fctx should be a filectx (not a workingfilectx)
1718 1717 # invert comparison to reuse the same code path
1719 1718 return fctx.cmp(self)
1720 1719
1721 1720 def remove(self, ignoremissing=False):
1722 1721 """wraps unlink for a repo's working directory"""
1723 1722 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1724 1723 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1725 1724 rmdir=rmdir)
1726 1725
1727 1726 def write(self, data, flags, backgroundclose=False, **kwargs):
1728 1727 """wraps repo.wwrite"""
1729 1728 self._repo.wwrite(self._path, data, flags,
1730 1729 backgroundclose=backgroundclose,
1731 1730 **kwargs)
1732 1731
1733 1732 def markcopied(self, src):
1734 1733 """marks this file a copy of `src`"""
1735 1734 if self._repo.dirstate[self._path] in "nma":
1736 1735 self._repo.dirstate.copy(src, self._path)
1737 1736
1738 1737 def clearunknown(self):
1739 1738 """Removes conflicting items in the working directory so that
1740 1739 ``write()`` can be called successfully.
1741 1740 """
1742 1741 wvfs = self._repo.wvfs
1743 1742 f = self._path
1744 1743 wvfs.audit(f)
1745 1744 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1746 1745 # remove files under the directory as they should already be
1747 1746 # warned and backed up
1748 1747 if wvfs.isdir(f) and not wvfs.islink(f):
1749 1748 wvfs.rmtree(f, forcibly=True)
1750 1749 for p in reversed(list(util.finddirs(f))):
1751 1750 if wvfs.isfileorlink(p):
1752 1751 wvfs.unlink(p)
1753 1752 break
1754 1753 else:
1755 1754 # don't remove files if path conflicts are not processed
1756 1755 if wvfs.isdir(f) and not wvfs.islink(f):
1757 1756 wvfs.removedirs(f)
1758 1757
1759 1758 def setflags(self, l, x):
1760 1759 self._repo.wvfs.setflags(self._path, l, x)
1761 1760
1762 1761 class overlayworkingctx(committablectx):
1763 1762 """Wraps another mutable context with a write-back cache that can be
1764 1763 converted into a commit context.
1765 1764
1766 1765 self._cache[path] maps to a dict with keys: {
1767 1766 'exists': bool?
1768 1767 'date': date?
1769 1768 'data': str?
1770 1769 'flags': str?
1771 1770 'copied': str? (path or None)
1772 1771 }
1773 1772 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1774 1773 is `False`, the file was deleted.
1775 1774 """
1776 1775
1777 1776 def __init__(self, repo):
1778 1777 super(overlayworkingctx, self).__init__(repo)
1779 1778 self.clean()
1780 1779
1781 1780 def setbase(self, wrappedctx):
1782 1781 self._wrappedctx = wrappedctx
1783 1782 self._parents = [wrappedctx]
1784 1783 # Drop old manifest cache as it is now out of date.
1785 1784 # This is necessary when, e.g., rebasing several nodes with one
1786 1785 # ``overlayworkingctx`` (e.g. with --collapse).
1787 1786 util.clearcachedproperty(self, '_manifest')
1788 1787
1789 1788 def data(self, path):
1790 1789 if self.isdirty(path):
1791 1790 if self._cache[path]['exists']:
1792 1791 if self._cache[path]['data']:
1793 1792 return self._cache[path]['data']
1794 1793 else:
1795 1794 # Must fallback here, too, because we only set flags.
1796 1795 return self._wrappedctx[path].data()
1797 1796 else:
1798 1797 raise error.ProgrammingError("No such file or directory: %s" %
1799 1798 path)
1800 1799 else:
1801 1800 return self._wrappedctx[path].data()
1802 1801
1803 1802 @propertycache
1804 1803 def _manifest(self):
1805 1804 parents = self.parents()
1806 1805 man = parents[0].manifest().copy()
1807 1806
1808 1807 flag = self._flagfunc
1809 1808 for path in self.added():
1810 1809 man[path] = addednodeid
1811 1810 man.setflag(path, flag(path))
1812 1811 for path in self.modified():
1813 1812 man[path] = modifiednodeid
1814 1813 man.setflag(path, flag(path))
1815 1814 for path in self.removed():
1816 1815 del man[path]
1817 1816 return man
1818 1817
1819 1818 @propertycache
1820 1819 def _flagfunc(self):
1821 1820 def f(path):
1822 1821 return self._cache[path]['flags']
1823 1822 return f
1824 1823
1825 1824 def files(self):
1826 1825 return sorted(self.added() + self.modified() + self.removed())
1827 1826
1828 1827 def modified(self):
1829 1828 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1830 1829 self._existsinparent(f)]
1831 1830
1832 1831 def added(self):
1833 1832 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1834 1833 not self._existsinparent(f)]
1835 1834
1836 1835 def removed(self):
1837 1836 return [f for f in self._cache.keys() if
1838 1837 not self._cache[f]['exists'] and self._existsinparent(f)]
1839 1838
1840 1839 def isinmemory(self):
1841 1840 return True
1842 1841
1843 1842 def filedate(self, path):
1844 1843 if self.isdirty(path):
1845 1844 return self._cache[path]['date']
1846 1845 else:
1847 1846 return self._wrappedctx[path].date()
1848 1847
1849 1848 def markcopied(self, path, origin):
1850 1849 if self.isdirty(path):
1851 1850 self._cache[path]['copied'] = origin
1852 1851 else:
1853 1852 raise error.ProgrammingError('markcopied() called on clean context')
1854 1853
1855 1854 def copydata(self, path):
1856 1855 if self.isdirty(path):
1857 1856 return self._cache[path]['copied']
1858 1857 else:
1859 1858 raise error.ProgrammingError('copydata() called on clean context')
1860 1859
1861 1860 def flags(self, path):
1862 1861 if self.isdirty(path):
1863 1862 if self._cache[path]['exists']:
1864 1863 return self._cache[path]['flags']
1865 1864 else:
1866 1865 raise error.ProgrammingError("No such file or directory: %s" %
1867 1866 self._path)
1868 1867 else:
1869 1868 return self._wrappedctx[path].flags()
1870 1869
1871 1870 def _existsinparent(self, path):
1872 1871 try:
1873 1872 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1874 1873 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1875 1874 # with an ``exists()`` function.
1876 1875 self._wrappedctx[path]
1877 1876 return True
1878 1877 except error.ManifestLookupError:
1879 1878 return False
1880 1879
1881 1880 def _auditconflicts(self, path):
1882 1881 """Replicates conflict checks done by wvfs.write().
1883 1882
1884 1883 Since we never write to the filesystem and never call `applyupdates` in
1885 1884 IMM, we'll never check that a path is actually writable -- e.g., because
1886 1885 it adds `a/foo`, but `a` is actually a file in the other commit.
1887 1886 """
1888 1887 def fail(path, component):
1889 1888 # p1() is the base and we're receiving "writes" for p2()'s
1890 1889 # files.
1891 1890 if 'l' in self.p1()[component].flags():
1892 1891 raise error.Abort("error: %s conflicts with symlink %s "
1893 1892 "in %s." % (path, component,
1894 1893 self.p1().rev()))
1895 1894 else:
1896 1895 raise error.Abort("error: '%s' conflicts with file '%s' in "
1897 1896 "%s." % (path, component,
1898 1897 self.p1().rev()))
1899 1898
1900 1899 # Test that each new directory to be created to write this path from p2
1901 1900 # is not a file in p1.
1902 1901 components = path.split('/')
1903 1902 for i in pycompat.xrange(len(components)):
1904 1903 component = "/".join(components[0:i])
1905 1904 if component in self.p1() and self._cache[component]['exists']:
1906 1905 fail(path, component)
1907 1906
1908 1907 # Test the other direction -- that this path from p2 isn't a directory
1909 1908 # in p1 (test that p1 doesn't any paths matching `path/*`).
1910 1909 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1911 1910 matches = self.p1().manifest().matches(match)
1912 1911 mfiles = matches.keys()
1913 1912 if len(mfiles) > 0:
1914 1913 if len(mfiles) == 1 and mfiles[0] == path:
1915 1914 return
1916 1915 # omit the files which are deleted in current IMM wctx
1917 1916 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1918 1917 if not mfiles:
1919 1918 return
1920 1919 raise error.Abort("error: file '%s' cannot be written because "
1921 1920 " '%s/' is a folder in %s (containing %d "
1922 1921 "entries: %s)"
1923 1922 % (path, path, self.p1(), len(mfiles),
1924 1923 ', '.join(mfiles)))
1925 1924
1926 1925 def write(self, path, data, flags='', **kwargs):
1927 1926 if data is None:
1928 1927 raise error.ProgrammingError("data must be non-None")
1929 1928 self._auditconflicts(path)
1930 1929 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1931 1930 flags=flags)
1932 1931
1933 1932 def setflags(self, path, l, x):
1934 1933 flag = ''
1935 1934 if l:
1936 1935 flag = 'l'
1937 1936 elif x:
1938 1937 flag = 'x'
1939 1938 self._markdirty(path, exists=True, date=dateutil.makedate(),
1940 1939 flags=flag)
1941 1940
1942 1941 def remove(self, path):
1943 1942 self._markdirty(path, exists=False)
1944 1943
1945 1944 def exists(self, path):
1946 1945 """exists behaves like `lexists`, but needs to follow symlinks and
1947 1946 return False if they are broken.
1948 1947 """
1949 1948 if self.isdirty(path):
1950 1949 # If this path exists and is a symlink, "follow" it by calling
1951 1950 # exists on the destination path.
1952 1951 if (self._cache[path]['exists'] and
1953 1952 'l' in self._cache[path]['flags']):
1954 1953 return self.exists(self._cache[path]['data'].strip())
1955 1954 else:
1956 1955 return self._cache[path]['exists']
1957 1956
1958 1957 return self._existsinparent(path)
1959 1958
1960 1959 def lexists(self, path):
1961 1960 """lexists returns True if the path exists"""
1962 1961 if self.isdirty(path):
1963 1962 return self._cache[path]['exists']
1964 1963
1965 1964 return self._existsinparent(path)
1966 1965
1967 1966 def size(self, path):
1968 1967 if self.isdirty(path):
1969 1968 if self._cache[path]['exists']:
1970 1969 return len(self._cache[path]['data'])
1971 1970 else:
1972 1971 raise error.ProgrammingError("No such file or directory: %s" %
1973 1972 self._path)
1974 1973 return self._wrappedctx[path].size()
1975 1974
1976 1975 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1977 1976 user=None, editor=None):
1978 1977 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1979 1978 committed.
1980 1979
1981 1980 ``text`` is the commit message.
1982 1981 ``parents`` (optional) are rev numbers.
1983 1982 """
1984 1983 # Default parents to the wrapped contexts' if not passed.
1985 1984 if parents is None:
1986 1985 parents = self._wrappedctx.parents()
1987 1986 if len(parents) == 1:
1988 1987 parents = (parents[0], None)
1989 1988
1990 1989 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1991 1990 if parents[1] is None:
1992 1991 parents = (self._repo[parents[0]], None)
1993 1992 else:
1994 1993 parents = (self._repo[parents[0]], self._repo[parents[1]])
1995 1994
1996 1995 files = self._cache.keys()
1997 1996 def getfile(repo, memctx, path):
1998 1997 if self._cache[path]['exists']:
1999 1998 return memfilectx(repo, memctx, path,
2000 1999 self._cache[path]['data'],
2001 2000 'l' in self._cache[path]['flags'],
2002 2001 'x' in self._cache[path]['flags'],
2003 2002 self._cache[path]['copied'])
2004 2003 else:
2005 2004 # Returning None, but including the path in `files`, is
2006 2005 # necessary for memctx to register a deletion.
2007 2006 return None
2008 2007 return memctx(self._repo, parents, text, files, getfile, date=date,
2009 2008 extra=extra, user=user, branch=branch, editor=editor)
2010 2009
2011 2010 def isdirty(self, path):
2012 2011 return path in self._cache
2013 2012
2014 2013 def isempty(self):
2015 2014 # We need to discard any keys that are actually clean before the empty
2016 2015 # commit check.
2017 2016 self._compact()
2018 2017 return len(self._cache) == 0
2019 2018
2020 2019 def clean(self):
2021 2020 self._cache = {}
2022 2021
2023 2022 def _compact(self):
2024 2023 """Removes keys from the cache that are actually clean, by comparing
2025 2024 them with the underlying context.
2026 2025
2027 2026 This can occur during the merge process, e.g. by passing --tool :local
2028 2027 to resolve a conflict.
2029 2028 """
2030 2029 keys = []
2031 2030 for path in self._cache.keys():
2032 2031 cache = self._cache[path]
2033 2032 try:
2034 2033 underlying = self._wrappedctx[path]
2035 2034 if (underlying.data() == cache['data'] and
2036 2035 underlying.flags() == cache['flags']):
2037 2036 keys.append(path)
2038 2037 except error.ManifestLookupError:
2039 2038 # Path not in the underlying manifest (created).
2040 2039 continue
2041 2040
2042 2041 for path in keys:
2043 2042 del self._cache[path]
2044 2043 return keys
2045 2044
2046 2045 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2047 2046 # data not provided, let's see if we already have some; if not, let's
2048 2047 # grab it from our underlying context, so that we always have data if
2049 2048 # the file is marked as existing.
2050 2049 if exists and data is None:
2051 2050 oldentry = self._cache.get(path) or {}
2052 2051 data = oldentry.get('data') or self._wrappedctx[path].data()
2053 2052
2054 2053 self._cache[path] = {
2055 2054 'exists': exists,
2056 2055 'data': data,
2057 2056 'date': date,
2058 2057 'flags': flags,
2059 2058 'copied': None,
2060 2059 }
2061 2060
2062 2061 def filectx(self, path, filelog=None):
2063 2062 return overlayworkingfilectx(self._repo, path, parent=self,
2064 2063 filelog=filelog)
2065 2064
2066 2065 class overlayworkingfilectx(committablefilectx):
2067 2066 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2068 2067 cache, which can be flushed through later by calling ``flush()``."""
2069 2068
2070 2069 def __init__(self, repo, path, filelog=None, parent=None):
2071 2070 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2072 2071 parent)
2073 2072 self._repo = repo
2074 2073 self._parent = parent
2075 2074 self._path = path
2076 2075
2077 2076 def cmp(self, fctx):
2078 2077 return self.data() != fctx.data()
2079 2078
2080 2079 def changectx(self):
2081 2080 return self._parent
2082 2081
2083 2082 def data(self):
2084 2083 return self._parent.data(self._path)
2085 2084
2086 2085 def date(self):
2087 2086 return self._parent.filedate(self._path)
2088 2087
2089 2088 def exists(self):
2090 2089 return self.lexists()
2091 2090
2092 2091 def lexists(self):
2093 2092 return self._parent.exists(self._path)
2094 2093
2095 2094 def renamed(self):
2096 2095 path = self._parent.copydata(self._path)
2097 2096 if not path:
2098 2097 return None
2099 2098 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2100 2099
2101 2100 def size(self):
2102 2101 return self._parent.size(self._path)
2103 2102
2104 2103 def markcopied(self, origin):
2105 2104 self._parent.markcopied(self._path, origin)
2106 2105
2107 2106 def audit(self):
2108 2107 pass
2109 2108
2110 2109 def flags(self):
2111 2110 return self._parent.flags(self._path)
2112 2111
2113 2112 def setflags(self, islink, isexec):
2114 2113 return self._parent.setflags(self._path, islink, isexec)
2115 2114
2116 2115 def write(self, data, flags, backgroundclose=False, **kwargs):
2117 2116 return self._parent.write(self._path, data, flags, **kwargs)
2118 2117
2119 2118 def remove(self, ignoremissing=False):
2120 2119 return self._parent.remove(self._path)
2121 2120
2122 2121 def clearunknown(self):
2123 2122 pass
2124 2123
2125 2124 class workingcommitctx(workingctx):
2126 2125 """A workingcommitctx object makes access to data related to
2127 2126 the revision being committed convenient.
2128 2127
2129 2128 This hides changes in the working directory, if they aren't
2130 2129 committed in this context.
2131 2130 """
2132 2131 def __init__(self, repo, changes,
2133 2132 text="", user=None, date=None, extra=None):
2134 2133 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2135 2134 changes)
2136 2135
2137 2136 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2138 2137 """Return matched files only in ``self._status``
2139 2138
2140 2139 Uncommitted files appear "clean" via this context, even if
2141 2140 they aren't actually so in the working directory.
2142 2141 """
2143 2142 if clean:
2144 2143 clean = [f for f in self._manifest if f not in self._changedset]
2145 2144 else:
2146 2145 clean = []
2147 2146 return scmutil.status([f for f in self._status.modified if match(f)],
2148 2147 [f for f in self._status.added if match(f)],
2149 2148 [f for f in self._status.removed if match(f)],
2150 2149 [], [], [], clean)
2151 2150
2152 2151 @propertycache
2153 2152 def _changedset(self):
2154 2153 """Return the set of files changed in this context
2155 2154 """
2156 2155 changed = set(self._status.modified)
2157 2156 changed.update(self._status.added)
2158 2157 changed.update(self._status.removed)
2159 2158 return changed
2160 2159
2161 2160 def makecachingfilectxfn(func):
2162 2161 """Create a filectxfn that caches based on the path.
2163 2162
2164 2163 We can't use util.cachefunc because it uses all arguments as the cache
2165 2164 key and this creates a cycle since the arguments include the repo and
2166 2165 memctx.
2167 2166 """
2168 2167 cache = {}
2169 2168
2170 2169 def getfilectx(repo, memctx, path):
2171 2170 if path not in cache:
2172 2171 cache[path] = func(repo, memctx, path)
2173 2172 return cache[path]
2174 2173
2175 2174 return getfilectx
2176 2175
2177 2176 def memfilefromctx(ctx):
2178 2177 """Given a context return a memfilectx for ctx[path]
2179 2178
2180 2179 This is a convenience method for building a memctx based on another
2181 2180 context.
2182 2181 """
2183 2182 def getfilectx(repo, memctx, path):
2184 2183 fctx = ctx[path]
2185 2184 # this is weird but apparently we only keep track of one parent
2186 2185 # (why not only store that instead of a tuple?)
2187 2186 copied = fctx.renamed()
2188 2187 if copied:
2189 2188 copied = copied[0]
2190 2189 return memfilectx(repo, memctx, path, fctx.data(),
2191 2190 islink=fctx.islink(), isexec=fctx.isexec(),
2192 2191 copied=copied)
2193 2192
2194 2193 return getfilectx
2195 2194
2196 2195 def memfilefrompatch(patchstore):
2197 2196 """Given a patch (e.g. patchstore object) return a memfilectx
2198 2197
2199 2198 This is a convenience method for building a memctx based on a patchstore.
2200 2199 """
2201 2200 def getfilectx(repo, memctx, path):
2202 2201 data, mode, copied = patchstore.getfile(path)
2203 2202 if data is None:
2204 2203 return None
2205 2204 islink, isexec = mode
2206 2205 return memfilectx(repo, memctx, path, data, islink=islink,
2207 2206 isexec=isexec, copied=copied)
2208 2207
2209 2208 return getfilectx
2210 2209
2211 2210 class memctx(committablectx):
2212 2211 """Use memctx to perform in-memory commits via localrepo.commitctx().
2213 2212
2214 2213 Revision information is supplied at initialization time while
2215 2214 related files data and is made available through a callback
2216 2215 mechanism. 'repo' is the current localrepo, 'parents' is a
2217 2216 sequence of two parent revisions identifiers (pass None for every
2218 2217 missing parent), 'text' is the commit message and 'files' lists
2219 2218 names of files touched by the revision (normalized and relative to
2220 2219 repository root).
2221 2220
2222 2221 filectxfn(repo, memctx, path) is a callable receiving the
2223 2222 repository, the current memctx object and the normalized path of
2224 2223 requested file, relative to repository root. It is fired by the
2225 2224 commit function for every file in 'files', but calls order is
2226 2225 undefined. If the file is available in the revision being
2227 2226 committed (updated or added), filectxfn returns a memfilectx
2228 2227 object. If the file was removed, filectxfn return None for recent
2229 2228 Mercurial. Moved files are represented by marking the source file
2230 2229 removed and the new file added with copy information (see
2231 2230 memfilectx).
2232 2231
2233 2232 user receives the committer name and defaults to current
2234 2233 repository username, date is the commit date in any format
2235 2234 supported by dateutil.parsedate() and defaults to current date, extra
2236 2235 is a dictionary of metadata or is left empty.
2237 2236 """
2238 2237
2239 2238 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2240 2239 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2241 2240 # this field to determine what to do in filectxfn.
2242 2241 _returnnoneformissingfiles = True
2243 2242
2244 2243 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2245 2244 date=None, extra=None, branch=None, editor=False):
2246 2245 super(memctx, self).__init__(repo, text, user, date, extra)
2247 2246 self._rev = None
2248 2247 self._node = None
2249 2248 parents = [(p or nullid) for p in parents]
2250 2249 p1, p2 = parents
2251 2250 self._parents = [self._repo[p] for p in (p1, p2)]
2252 2251 files = sorted(set(files))
2253 2252 self._files = files
2254 2253 if branch is not None:
2255 2254 self._extra['branch'] = encoding.fromlocal(branch)
2256 2255 self.substate = {}
2257 2256
2258 2257 if isinstance(filectxfn, patch.filestore):
2259 2258 filectxfn = memfilefrompatch(filectxfn)
2260 2259 elif not callable(filectxfn):
2261 2260 # if store is not callable, wrap it in a function
2262 2261 filectxfn = memfilefromctx(filectxfn)
2263 2262
2264 2263 # memoizing increases performance for e.g. vcs convert scenarios.
2265 2264 self._filectxfn = makecachingfilectxfn(filectxfn)
2266 2265
2267 2266 if editor:
2268 2267 self._text = editor(self._repo, self, [])
2269 2268 self._repo.savecommitmessage(self._text)
2270 2269
2271 2270 def filectx(self, path, filelog=None):
2272 2271 """get a file context from the working directory
2273 2272
2274 2273 Returns None if file doesn't exist and should be removed."""
2275 2274 return self._filectxfn(self._repo, self, path)
2276 2275
2277 2276 def commit(self):
2278 2277 """commit context to the repo"""
2279 2278 return self._repo.commitctx(self)
2280 2279
2281 2280 @propertycache
2282 2281 def _manifest(self):
2283 2282 """generate a manifest based on the return values of filectxfn"""
2284 2283
2285 2284 # keep this simple for now; just worry about p1
2286 2285 pctx = self._parents[0]
2287 2286 man = pctx.manifest().copy()
2288 2287
2289 2288 for f in self._status.modified:
2290 p1node = nullid
2291 p2node = nullid
2292 p = pctx[f].parents() # if file isn't in pctx, check p2?
2293 if len(p) > 0:
2294 p1node = p[0].filenode()
2295 if len(p) > 1:
2296 p2node = p[1].filenode()
2297 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2289 man[f] = modifiednodeid
2298 2290
2299 2291 for f in self._status.added:
2300 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2292 man[f] = addednodeid
2301 2293
2302 2294 for f in self._status.removed:
2303 2295 if f in man:
2304 2296 del man[f]
2305 2297
2306 2298 return man
2307 2299
2308 2300 @propertycache
2309 2301 def _status(self):
2310 2302 """Calculate exact status from ``files`` specified at construction
2311 2303 """
2312 2304 man1 = self.p1().manifest()
2313 2305 p2 = self._parents[1]
2314 2306 # "1 < len(self._parents)" can't be used for checking
2315 2307 # existence of the 2nd parent, because "memctx._parents" is
2316 2308 # explicitly initialized by the list, of which length is 2.
2317 2309 if p2.node() != nullid:
2318 2310 man2 = p2.manifest()
2319 2311 managing = lambda f: f in man1 or f in man2
2320 2312 else:
2321 2313 managing = lambda f: f in man1
2322 2314
2323 2315 modified, added, removed = [], [], []
2324 2316 for f in self._files:
2325 2317 if not managing(f):
2326 2318 added.append(f)
2327 2319 elif self[f]:
2328 2320 modified.append(f)
2329 2321 else:
2330 2322 removed.append(f)
2331 2323
2332 2324 return scmutil.status(modified, added, removed, [], [], [], [])
2333 2325
2334 2326 class memfilectx(committablefilectx):
2335 2327 """memfilectx represents an in-memory file to commit.
2336 2328
2337 2329 See memctx and committablefilectx for more details.
2338 2330 """
2339 2331 def __init__(self, repo, changectx, path, data, islink=False,
2340 2332 isexec=False, copied=None):
2341 2333 """
2342 2334 path is the normalized file path relative to repository root.
2343 2335 data is the file content as a string.
2344 2336 islink is True if the file is a symbolic link.
2345 2337 isexec is True if the file is executable.
2346 2338 copied is the source file path if current file was copied in the
2347 2339 revision being committed, or None."""
2348 2340 super(memfilectx, self).__init__(repo, path, None, changectx)
2349 2341 self._data = data
2350 2342 if islink:
2351 2343 self._flags = 'l'
2352 2344 elif isexec:
2353 2345 self._flags = 'x'
2354 2346 else:
2355 2347 self._flags = ''
2356 2348 self._copied = None
2357 2349 if copied:
2358 2350 self._copied = (copied, nullid)
2359 2351
2360 2352 def data(self):
2361 2353 return self._data
2362 2354
2363 2355 def remove(self, ignoremissing=False):
2364 2356 """wraps unlink for a repo's working directory"""
2365 2357 # need to figure out what to do here
2366 2358 del self._changectx[self._path]
2367 2359
2368 2360 def write(self, data, flags, **kwargs):
2369 2361 """wraps repo.wwrite"""
2370 2362 self._data = data
2371 2363
2372 2364
2373 2365 class metadataonlyctx(committablectx):
2374 2366 """Like memctx but it's reusing the manifest of different commit.
2375 2367 Intended to be used by lightweight operations that are creating
2376 2368 metadata-only changes.
2377 2369
2378 2370 Revision information is supplied at initialization time. 'repo' is the
2379 2371 current localrepo, 'ctx' is original revision which manifest we're reuisng
2380 2372 'parents' is a sequence of two parent revisions identifiers (pass None for
2381 2373 every missing parent), 'text' is the commit.
2382 2374
2383 2375 user receives the committer name and defaults to current repository
2384 2376 username, date is the commit date in any format supported by
2385 2377 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2386 2378 metadata or is left empty.
2387 2379 """
2388 2380 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2389 2381 date=None, extra=None, editor=False):
2390 2382 if text is None:
2391 2383 text = originalctx.description()
2392 2384 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2393 2385 self._rev = None
2394 2386 self._node = None
2395 2387 self._originalctx = originalctx
2396 2388 self._manifestnode = originalctx.manifestnode()
2397 2389 if parents is None:
2398 2390 parents = originalctx.parents()
2399 2391 else:
2400 2392 parents = [repo[p] for p in parents if p is not None]
2401 2393 parents = parents[:]
2402 2394 while len(parents) < 2:
2403 2395 parents.append(repo[nullid])
2404 2396 p1, p2 = self._parents = parents
2405 2397
2406 2398 # sanity check to ensure that the reused manifest parents are
2407 2399 # manifests of our commit parents
2408 2400 mp1, mp2 = self.manifestctx().parents
2409 2401 if p1 != nullid and p1.manifestnode() != mp1:
2410 2402 raise RuntimeError('can\'t reuse the manifest: '
2411 2403 'its p1 doesn\'t match the new ctx p1')
2412 2404 if p2 != nullid and p2.manifestnode() != mp2:
2413 2405 raise RuntimeError('can\'t reuse the manifest: '
2414 2406 'its p2 doesn\'t match the new ctx p2')
2415 2407
2416 2408 self._files = originalctx.files()
2417 2409 self.substate = {}
2418 2410
2419 2411 if editor:
2420 2412 self._text = editor(self._repo, self, [])
2421 2413 self._repo.savecommitmessage(self._text)
2422 2414
2423 2415 def manifestnode(self):
2424 2416 return self._manifestnode
2425 2417
2426 2418 @property
2427 2419 def _manifestctx(self):
2428 2420 return self._repo.manifestlog[self._manifestnode]
2429 2421
2430 2422 def filectx(self, path, filelog=None):
2431 2423 return self._originalctx.filectx(path, filelog=filelog)
2432 2424
2433 2425 def commit(self):
2434 2426 """commit context to the repo"""
2435 2427 return self._repo.commitctx(self)
2436 2428
2437 2429 @property
2438 2430 def _manifest(self):
2439 2431 return self._originalctx.manifest()
2440 2432
2441 2433 @propertycache
2442 2434 def _status(self):
2443 2435 """Calculate exact status from ``files`` specified in the ``origctx``
2444 2436 and parents manifests.
2445 2437 """
2446 2438 man1 = self.p1().manifest()
2447 2439 p2 = self._parents[1]
2448 2440 # "1 < len(self._parents)" can't be used for checking
2449 2441 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2450 2442 # explicitly initialized by the list, of which length is 2.
2451 2443 if p2.node() != nullid:
2452 2444 man2 = p2.manifest()
2453 2445 managing = lambda f: f in man1 or f in man2
2454 2446 else:
2455 2447 managing = lambda f: f in man1
2456 2448
2457 2449 modified, added, removed = [], [], []
2458 2450 for f in self._files:
2459 2451 if not managing(f):
2460 2452 added.append(f)
2461 2453 elif f in self:
2462 2454 modified.append(f)
2463 2455 else:
2464 2456 removed.append(f)
2465 2457
2466 2458 return scmutil.status(modified, added, removed, [], [], [], [])
2467 2459
2468 2460 class arbitraryfilectx(object):
2469 2461 """Allows you to use filectx-like functions on a file in an arbitrary
2470 2462 location on disk, possibly not in the working directory.
2471 2463 """
2472 2464 def __init__(self, path, repo=None):
2473 2465 # Repo is optional because contrib/simplemerge uses this class.
2474 2466 self._repo = repo
2475 2467 self._path = path
2476 2468
2477 2469 def cmp(self, fctx):
2478 2470 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2479 2471 # path if either side is a symlink.
2480 2472 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2481 2473 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2482 2474 # Add a fast-path for merge if both sides are disk-backed.
2483 2475 # Note that filecmp uses the opposite return values (True if same)
2484 2476 # from our cmp functions (True if different).
2485 2477 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2486 2478 return self.data() != fctx.data()
2487 2479
2488 2480 def path(self):
2489 2481 return self._path
2490 2482
2491 2483 def flags(self):
2492 2484 return ''
2493 2485
2494 2486 def data(self):
2495 2487 return util.readfile(self._path)
2496 2488
2497 2489 def decodeddata(self):
2498 2490 with open(self._path, "rb") as f:
2499 2491 return f.read()
2500 2492
2501 2493 def remove(self):
2502 2494 util.unlink(self._path)
2503 2495
2504 2496 def write(self, data, flags, **kwargs):
2505 2497 assert not flags
2506 2498 with open(self._path, "w") as f:
2507 2499 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now