##// END OF EJS Templates
context: add arbitraryfilectx, which can represent files outside the workdir...
Phil Cohen -
r34053:d2fc8842 default
parent child Browse files
Show More
@@ -1,100 +1,81 b''
1 1 #!/usr/bin/env python
2 2 from __future__ import absolute_import
3 3
4 4 import getopt
5 5 import sys
6 6
7 7 import hgdemandimport
8 8 hgdemandimport.enable()
9 9
10 10 from mercurial.i18n import _
11 11 from mercurial import (
12 context,
12 13 error,
13 14 fancyopts,
14 15 simplemerge,
15 16 ui as uimod,
16 17 util,
17 18 )
18 19
19 20 options = [('L', 'label', [], _('labels to use on conflict markers')),
20 21 ('a', 'text', None, _('treat all files as text')),
21 22 ('p', 'print', None,
22 23 _('print results instead of overwriting LOCAL')),
23 24 ('', 'no-minimal', None, _('no effect (DEPRECATED)')),
24 25 ('h', 'help', None, _('display help and exit')),
25 26 ('q', 'quiet', None, _('suppress output'))]
26 27
27 28 usage = _('''simplemerge [OPTS] LOCAL BASE OTHER
28 29
29 30 Simple three-way file merge utility with a minimal feature set.
30 31
31 32 Apply to LOCAL the changes necessary to go from BASE to OTHER.
32 33
33 34 By default, LOCAL is overwritten with the results of this operation.
34 35 ''')
35 36
36 37 class ParseError(Exception):
37 38 """Exception raised on errors in parsing the command line."""
38 39
39 40 def showhelp():
40 41 sys.stdout.write(usage)
41 42 sys.stdout.write('\noptions:\n')
42 43
43 44 out_opts = []
44 45 for shortopt, longopt, default, desc in options:
45 46 out_opts.append(('%2s%s' % (shortopt and '-%s' % shortopt,
46 47 longopt and ' --%s' % longopt),
47 48 '%s' % desc))
48 49 opts_len = max([len(opt[0]) for opt in out_opts])
49 50 for first, second in out_opts:
50 51 sys.stdout.write(' %-*s %s\n' % (opts_len, first, second))
51 52
52 class filebackedctx(object):
53 """simplemerge requires context-like objects"""
54 def __init__(self, path):
55 self._path = path
56
57 def decodeddata(self):
58 with open(self._path, "rb") as f:
59 return f.read()
60
61 def flags(self):
62 return ''
63
64 def path(self):
65 return self._path
66
67 def write(self, data, flags):
68 assert not flags
69 with open(self._path, "w") as f:
70 f.write(data)
71
72 53 try:
73 54 for fp in (sys.stdin, sys.stdout, sys.stderr):
74 55 util.setbinary(fp)
75 56
76 57 opts = {}
77 58 try:
78 59 args = fancyopts.fancyopts(sys.argv[1:], options, opts)
79 60 except getopt.GetoptError as e:
80 61 raise ParseError(e)
81 62 if opts['help']:
82 63 showhelp()
83 64 sys.exit(0)
84 65 if len(args) != 3:
85 66 raise ParseError(_('wrong number of arguments'))
86 67 local, base, other = args
87 68 sys.exit(simplemerge.simplemerge(uimod.ui.load(),
88 filebackedctx(local),
89 filebackedctx(base),
90 filebackedctx(other),
69 context.arbitraryfilectx(local),
70 context.arbitraryfilectx(base),
71 context.arbitraryfilectx(other),
91 72 **opts))
92 73 except ParseError as e:
93 74 sys.stdout.write("%s: %s\n" % (sys.argv[0], e))
94 75 showhelp()
95 76 sys.exit(1)
96 77 except error.Abort as e:
97 78 sys.stderr.write("abort: %s\n" % e)
98 79 sys.exit(255)
99 80 except KeyboardInterrupt:
100 81 sys.exit(255)
@@ -1,2387 +1,2418 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 pathutil,
37 37 phases,
38 38 pycompat,
39 39 repoview,
40 40 revlog,
41 41 scmutil,
42 42 sparse,
43 43 subrepo,
44 44 util,
45 45 )
46 46
47 47 propertycache = util.propertycache
48 48
49 49 nonascii = re.compile(r'[^\x21-\x7f]').search
50 50
51 51 class basectx(object):
52 52 """A basectx object represents the common logic for its children:
53 53 changectx: read-only context that is already present in the repo,
54 54 workingctx: a context that represents the working directory and can
55 55 be committed,
56 56 memctx: a context that represents changes in-memory and can also
57 57 be committed."""
58 58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 59 if isinstance(changeid, basectx):
60 60 return changeid
61 61
62 62 o = super(basectx, cls).__new__(cls)
63 63
64 64 o._repo = repo
65 65 o._rev = nullrev
66 66 o._node = nullid
67 67
68 68 return o
69 69
70 70 def __bytes__(self):
71 71 return short(self.node())
72 72
73 73 __str__ = encoding.strmethod(__bytes__)
74 74
75 75 def __int__(self):
76 76 return self.rev()
77 77
78 78 def __repr__(self):
79 79 return r"<%s %s>" % (type(self).__name__, str(self))
80 80
81 81 def __eq__(self, other):
82 82 try:
83 83 return type(self) == type(other) and self._rev == other._rev
84 84 except AttributeError:
85 85 return False
86 86
87 87 def __ne__(self, other):
88 88 return not (self == other)
89 89
90 90 def __contains__(self, key):
91 91 return key in self._manifest
92 92
93 93 def __getitem__(self, key):
94 94 return self.filectx(key)
95 95
96 96 def __iter__(self):
97 97 return iter(self._manifest)
98 98
99 99 def _buildstatusmanifest(self, status):
100 100 """Builds a manifest that includes the given status results, if this is
101 101 a working copy context. For non-working copy contexts, it just returns
102 102 the normal manifest."""
103 103 return self.manifest()
104 104
105 105 def _matchstatus(self, other, match):
106 106 """This internal method provides a way for child objects to override the
107 107 match operator.
108 108 """
109 109 return match
110 110
111 111 def _buildstatus(self, other, s, match, listignored, listclean,
112 112 listunknown):
113 113 """build a status with respect to another context"""
114 114 # Load earliest manifest first for caching reasons. More specifically,
115 115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 117 # 1000 and cache it so that when you read 1001, we just need to apply a
118 118 # delta to what's in the cache. So that's one full reconstruction + one
119 119 # delta application.
120 120 mf2 = None
121 121 if self.rev() is not None and self.rev() < other.rev():
122 122 mf2 = self._buildstatusmanifest(s)
123 123 mf1 = other._buildstatusmanifest(s)
124 124 if mf2 is None:
125 125 mf2 = self._buildstatusmanifest(s)
126 126
127 127 modified, added = [], []
128 128 removed = []
129 129 clean = []
130 130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
131 131 deletedset = set(deleted)
132 132 d = mf1.diff(mf2, match=match, clean=listclean)
133 133 for fn, value in d.iteritems():
134 134 if fn in deletedset:
135 135 continue
136 136 if value is None:
137 137 clean.append(fn)
138 138 continue
139 139 (node1, flag1), (node2, flag2) = value
140 140 if node1 is None:
141 141 added.append(fn)
142 142 elif node2 is None:
143 143 removed.append(fn)
144 144 elif flag1 != flag2:
145 145 modified.append(fn)
146 146 elif node2 not in wdirnodes:
147 147 # When comparing files between two commits, we save time by
148 148 # not comparing the file contents when the nodeids differ.
149 149 # Note that this means we incorrectly report a reverted change
150 150 # to a file as a modification.
151 151 modified.append(fn)
152 152 elif self[fn].cmp(other[fn]):
153 153 modified.append(fn)
154 154 else:
155 155 clean.append(fn)
156 156
157 157 if removed:
158 158 # need to filter files if they are already reported as removed
159 159 unknown = [fn for fn in unknown if fn not in mf1 and
160 160 (not match or match(fn))]
161 161 ignored = [fn for fn in ignored if fn not in mf1 and
162 162 (not match or match(fn))]
163 163 # if they're deleted, don't report them as removed
164 164 removed = [fn for fn in removed if fn not in deletedset]
165 165
166 166 return scmutil.status(modified, added, removed, deleted, unknown,
167 167 ignored, clean)
168 168
169 169 @propertycache
170 170 def substate(self):
171 171 return subrepo.state(self, self._repo.ui)
172 172
173 173 def subrev(self, subpath):
174 174 return self.substate[subpath][1]
175 175
176 176 def rev(self):
177 177 return self._rev
178 178 def node(self):
179 179 return self._node
180 180 def hex(self):
181 181 return hex(self.node())
182 182 def manifest(self):
183 183 return self._manifest
184 184 def manifestctx(self):
185 185 return self._manifestctx
186 186 def repo(self):
187 187 return self._repo
188 188 def phasestr(self):
189 189 return phases.phasenames[self.phase()]
190 190 def mutable(self):
191 191 return self.phase() > phases.public
192 192
193 193 def getfileset(self, expr):
194 194 return fileset.getfileset(self, expr)
195 195
196 196 def obsolete(self):
197 197 """True if the changeset is obsolete"""
198 198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
199 199
200 200 def extinct(self):
201 201 """True if the changeset is extinct"""
202 202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
203 203
204 204 def unstable(self):
205 205 msg = ("'context.unstable' is deprecated, "
206 206 "use 'context.orphan'")
207 207 self._repo.ui.deprecwarn(msg, '4.4')
208 208 return self.orphan()
209 209
210 210 def orphan(self):
211 211 """True if the changeset is not obsolete but it's ancestor are"""
212 212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
213 213
214 214 def bumped(self):
215 215 msg = ("'context.bumped' is deprecated, "
216 216 "use 'context.phasedivergent'")
217 217 self._repo.ui.deprecwarn(msg, '4.4')
218 218 return self.phasedivergent()
219 219
220 220 def phasedivergent(self):
221 221 """True if the changeset try to be a successor of a public changeset
222 222
223 223 Only non-public and non-obsolete changesets may be bumped.
224 224 """
225 225 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
226 226
227 227 def divergent(self):
228 228 msg = ("'context.divergent' is deprecated, "
229 229 "use 'context.contentdivergent'")
230 230 self._repo.ui.deprecwarn(msg, '4.4')
231 231 return self.contentdivergent()
232 232
233 233 def contentdivergent(self):
234 234 """Is a successors of a changeset with multiple possible successors set
235 235
236 236 Only non-public and non-obsolete changesets may be divergent.
237 237 """
238 238 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
239 239
240 240 def troubled(self):
241 241 msg = ("'context.troubled' is deprecated, "
242 242 "use 'context.isunstable'")
243 243 self._repo.ui.deprecwarn(msg, '4.4')
244 244 return self.isunstable()
245 245
246 246 def isunstable(self):
247 247 """True if the changeset is either unstable, bumped or divergent"""
248 248 return self.orphan() or self.phasedivergent() or self.contentdivergent()
249 249
250 250 def troubles(self):
251 251 """Keep the old version around in order to avoid breaking extensions
252 252 about different return values.
253 253 """
254 254 msg = ("'context.troubles' is deprecated, "
255 255 "use 'context.instabilities'")
256 256 self._repo.ui.deprecwarn(msg, '4.4')
257 257
258 258 troubles = []
259 259 if self.orphan():
260 260 troubles.append('orphan')
261 261 if self.phasedivergent():
262 262 troubles.append('bumped')
263 263 if self.contentdivergent():
264 264 troubles.append('divergent')
265 265 return troubles
266 266
267 267 def instabilities(self):
268 268 """return the list of instabilities affecting this changeset.
269 269
270 270 Instabilities are returned as strings. possible values are:
271 271 - orphan,
272 272 - phase-divergent,
273 273 - content-divergent.
274 274 """
275 275 instabilities = []
276 276 if self.orphan():
277 277 instabilities.append('orphan')
278 278 if self.phasedivergent():
279 279 instabilities.append('phase-divergent')
280 280 if self.contentdivergent():
281 281 instabilities.append('content-divergent')
282 282 return instabilities
283 283
284 284 def parents(self):
285 285 """return contexts for each parent changeset"""
286 286 return self._parents
287 287
288 288 def p1(self):
289 289 return self._parents[0]
290 290
291 291 def p2(self):
292 292 parents = self._parents
293 293 if len(parents) == 2:
294 294 return parents[1]
295 295 return changectx(self._repo, nullrev)
296 296
297 297 def _fileinfo(self, path):
298 298 if r'_manifest' in self.__dict__:
299 299 try:
300 300 return self._manifest[path], self._manifest.flags(path)
301 301 except KeyError:
302 302 raise error.ManifestLookupError(self._node, path,
303 303 _('not found in manifest'))
304 304 if r'_manifestdelta' in self.__dict__ or path in self.files():
305 305 if path in self._manifestdelta:
306 306 return (self._manifestdelta[path],
307 307 self._manifestdelta.flags(path))
308 308 mfl = self._repo.manifestlog
309 309 try:
310 310 node, flag = mfl[self._changeset.manifest].find(path)
311 311 except KeyError:
312 312 raise error.ManifestLookupError(self._node, path,
313 313 _('not found in manifest'))
314 314
315 315 return node, flag
316 316
317 317 def filenode(self, path):
318 318 return self._fileinfo(path)[0]
319 319
320 320 def flags(self, path):
321 321 try:
322 322 return self._fileinfo(path)[1]
323 323 except error.LookupError:
324 324 return ''
325 325
326 326 def sub(self, path, allowcreate=True):
327 327 '''return a subrepo for the stored revision of path, never wdir()'''
328 328 return subrepo.subrepo(self, path, allowcreate=allowcreate)
329 329
330 330 def nullsub(self, path, pctx):
331 331 return subrepo.nullsubrepo(self, path, pctx)
332 332
333 333 def workingsub(self, path):
334 334 '''return a subrepo for the stored revision, or wdir if this is a wdir
335 335 context.
336 336 '''
337 337 return subrepo.subrepo(self, path, allowwdir=True)
338 338
339 339 def match(self, pats=None, include=None, exclude=None, default='glob',
340 340 listsubrepos=False, badfn=None):
341 341 r = self._repo
342 342 return matchmod.match(r.root, r.getcwd(), pats,
343 343 include, exclude, default,
344 344 auditor=r.nofsauditor, ctx=self,
345 345 listsubrepos=listsubrepos, badfn=badfn)
346 346
347 347 def diff(self, ctx2=None, match=None, **opts):
348 348 """Returns a diff generator for the given contexts and matcher"""
349 349 if ctx2 is None:
350 350 ctx2 = self.p1()
351 351 if ctx2 is not None:
352 352 ctx2 = self._repo[ctx2]
353 353 diffopts = patch.diffopts(self._repo.ui, opts)
354 354 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
355 355
356 356 def dirs(self):
357 357 return self._manifest.dirs()
358 358
359 359 def hasdir(self, dir):
360 360 return self._manifest.hasdir(dir)
361 361
362 362 def status(self, other=None, match=None, listignored=False,
363 363 listclean=False, listunknown=False, listsubrepos=False):
364 364 """return status of files between two nodes or node and working
365 365 directory.
366 366
367 367 If other is None, compare this node with working directory.
368 368
369 369 returns (modified, added, removed, deleted, unknown, ignored, clean)
370 370 """
371 371
372 372 ctx1 = self
373 373 ctx2 = self._repo[other]
374 374
375 375 # This next code block is, admittedly, fragile logic that tests for
376 376 # reversing the contexts and wouldn't need to exist if it weren't for
377 377 # the fast (and common) code path of comparing the working directory
378 378 # with its first parent.
379 379 #
380 380 # What we're aiming for here is the ability to call:
381 381 #
382 382 # workingctx.status(parentctx)
383 383 #
384 384 # If we always built the manifest for each context and compared those,
385 385 # then we'd be done. But the special case of the above call means we
386 386 # just copy the manifest of the parent.
387 387 reversed = False
388 388 if (not isinstance(ctx1, changectx)
389 389 and isinstance(ctx2, changectx)):
390 390 reversed = True
391 391 ctx1, ctx2 = ctx2, ctx1
392 392
393 393 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
394 394 match = ctx2._matchstatus(ctx1, match)
395 395 r = scmutil.status([], [], [], [], [], [], [])
396 396 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
397 397 listunknown)
398 398
399 399 if reversed:
400 400 # Reverse added and removed. Clear deleted, unknown and ignored as
401 401 # these make no sense to reverse.
402 402 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
403 403 r.clean)
404 404
405 405 if listsubrepos:
406 406 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
407 407 try:
408 408 rev2 = ctx2.subrev(subpath)
409 409 except KeyError:
410 410 # A subrepo that existed in node1 was deleted between
411 411 # node1 and node2 (inclusive). Thus, ctx2's substate
412 412 # won't contain that subpath. The best we can do ignore it.
413 413 rev2 = None
414 414 submatch = matchmod.subdirmatcher(subpath, match)
415 415 s = sub.status(rev2, match=submatch, ignored=listignored,
416 416 clean=listclean, unknown=listunknown,
417 417 listsubrepos=True)
418 418 for rfiles, sfiles in zip(r, s):
419 419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
420 420
421 421 for l in r:
422 422 l.sort()
423 423
424 424 return r
425 425
426 426 def _filterederror(repo, changeid):
427 427 """build an exception to be raised about a filtered changeid
428 428
429 429 This is extracted in a function to help extensions (eg: evolve) to
430 430 experiment with various message variants."""
431 431 if repo.filtername.startswith('visible'):
432 432 msg = _("hidden revision '%s'") % changeid
433 433 hint = _('use --hidden to access hidden revisions')
434 434 return error.FilteredRepoLookupError(msg, hint=hint)
435 435 msg = _("filtered revision '%s' (not in '%s' subset)")
436 436 msg %= (changeid, repo.filtername)
437 437 return error.FilteredRepoLookupError(msg)
438 438
439 439 class changectx(basectx):
440 440 """A changecontext object makes access to data related to a particular
441 441 changeset convenient. It represents a read-only context already present in
442 442 the repo."""
443 443 def __init__(self, repo, changeid=''):
444 444 """changeid is a revision number, node, or tag"""
445 445
446 446 # since basectx.__new__ already took care of copying the object, we
447 447 # don't need to do anything in __init__, so we just exit here
448 448 if isinstance(changeid, basectx):
449 449 return
450 450
451 451 if changeid == '':
452 452 changeid = '.'
453 453 self._repo = repo
454 454
455 455 try:
456 456 if isinstance(changeid, int):
457 457 self._node = repo.changelog.node(changeid)
458 458 self._rev = changeid
459 459 return
460 460 if not pycompat.ispy3 and isinstance(changeid, long):
461 461 changeid = str(changeid)
462 462 if changeid == 'null':
463 463 self._node = nullid
464 464 self._rev = nullrev
465 465 return
466 466 if changeid == 'tip':
467 467 self._node = repo.changelog.tip()
468 468 self._rev = repo.changelog.rev(self._node)
469 469 return
470 470 if changeid == '.' or changeid == repo.dirstate.p1():
471 471 # this is a hack to delay/avoid loading obsmarkers
472 472 # when we know that '.' won't be hidden
473 473 self._node = repo.dirstate.p1()
474 474 self._rev = repo.unfiltered().changelog.rev(self._node)
475 475 return
476 476 if len(changeid) == 20:
477 477 try:
478 478 self._node = changeid
479 479 self._rev = repo.changelog.rev(changeid)
480 480 return
481 481 except error.FilteredRepoLookupError:
482 482 raise
483 483 except LookupError:
484 484 pass
485 485
486 486 try:
487 487 r = int(changeid)
488 488 if '%d' % r != changeid:
489 489 raise ValueError
490 490 l = len(repo.changelog)
491 491 if r < 0:
492 492 r += l
493 493 if r < 0 or r >= l and r != wdirrev:
494 494 raise ValueError
495 495 self._rev = r
496 496 self._node = repo.changelog.node(r)
497 497 return
498 498 except error.FilteredIndexError:
499 499 raise
500 500 except (ValueError, OverflowError, IndexError):
501 501 pass
502 502
503 503 if len(changeid) == 40:
504 504 try:
505 505 self._node = bin(changeid)
506 506 self._rev = repo.changelog.rev(self._node)
507 507 return
508 508 except error.FilteredLookupError:
509 509 raise
510 510 except (TypeError, LookupError):
511 511 pass
512 512
513 513 # lookup bookmarks through the name interface
514 514 try:
515 515 self._node = repo.names.singlenode(repo, changeid)
516 516 self._rev = repo.changelog.rev(self._node)
517 517 return
518 518 except KeyError:
519 519 pass
520 520 except error.FilteredRepoLookupError:
521 521 raise
522 522 except error.RepoLookupError:
523 523 pass
524 524
525 525 self._node = repo.unfiltered().changelog._partialmatch(changeid)
526 526 if self._node is not None:
527 527 self._rev = repo.changelog.rev(self._node)
528 528 return
529 529
530 530 # lookup failed
531 531 # check if it might have come from damaged dirstate
532 532 #
533 533 # XXX we could avoid the unfiltered if we had a recognizable
534 534 # exception for filtered changeset access
535 535 if changeid in repo.unfiltered().dirstate.parents():
536 536 msg = _("working directory has unknown parent '%s'!")
537 537 raise error.Abort(msg % short(changeid))
538 538 try:
539 539 if len(changeid) == 20 and nonascii(changeid):
540 540 changeid = hex(changeid)
541 541 except TypeError:
542 542 pass
543 543 except (error.FilteredIndexError, error.FilteredLookupError,
544 544 error.FilteredRepoLookupError):
545 545 raise _filterederror(repo, changeid)
546 546 except IndexError:
547 547 pass
548 548 raise error.RepoLookupError(
549 549 _("unknown revision '%s'") % changeid)
550 550
551 551 def __hash__(self):
552 552 try:
553 553 return hash(self._rev)
554 554 except AttributeError:
555 555 return id(self)
556 556
557 557 def __nonzero__(self):
558 558 return self._rev != nullrev
559 559
560 560 __bool__ = __nonzero__
561 561
562 562 @propertycache
563 563 def _changeset(self):
564 564 return self._repo.changelog.changelogrevision(self.rev())
565 565
566 566 @propertycache
567 567 def _manifest(self):
568 568 return self._manifestctx.read()
569 569
570 570 @property
571 571 def _manifestctx(self):
572 572 return self._repo.manifestlog[self._changeset.manifest]
573 573
574 574 @propertycache
575 575 def _manifestdelta(self):
576 576 return self._manifestctx.readdelta()
577 577
578 578 @propertycache
579 579 def _parents(self):
580 580 repo = self._repo
581 581 p1, p2 = repo.changelog.parentrevs(self._rev)
582 582 if p2 == nullrev:
583 583 return [changectx(repo, p1)]
584 584 return [changectx(repo, p1), changectx(repo, p2)]
585 585
586 586 def changeset(self):
587 587 c = self._changeset
588 588 return (
589 589 c.manifest,
590 590 c.user,
591 591 c.date,
592 592 c.files,
593 593 c.description,
594 594 c.extra,
595 595 )
596 596 def manifestnode(self):
597 597 return self._changeset.manifest
598 598
599 599 def user(self):
600 600 return self._changeset.user
601 601 def date(self):
602 602 return self._changeset.date
603 603 def files(self):
604 604 return self._changeset.files
605 605 def description(self):
606 606 return self._changeset.description
607 607 def branch(self):
608 608 return encoding.tolocal(self._changeset.extra.get("branch"))
609 609 def closesbranch(self):
610 610 return 'close' in self._changeset.extra
611 611 def extra(self):
612 612 return self._changeset.extra
613 613 def tags(self):
614 614 return self._repo.nodetags(self._node)
615 615 def bookmarks(self):
616 616 return self._repo.nodebookmarks(self._node)
617 617 def phase(self):
618 618 return self._repo._phasecache.phase(self._repo, self._rev)
619 619 def hidden(self):
620 620 return self._rev in repoview.filterrevs(self._repo, 'visible')
621 621
622 622 def children(self):
623 623 """return contexts for each child changeset"""
624 624 c = self._repo.changelog.children(self._node)
625 625 return [changectx(self._repo, x) for x in c]
626 626
627 627 def ancestors(self):
628 628 for a in self._repo.changelog.ancestors([self._rev]):
629 629 yield changectx(self._repo, a)
630 630
631 631 def descendants(self):
632 632 for d in self._repo.changelog.descendants([self._rev]):
633 633 yield changectx(self._repo, d)
634 634
635 635 def filectx(self, path, fileid=None, filelog=None):
636 636 """get a file context from this changeset"""
637 637 if fileid is None:
638 638 fileid = self.filenode(path)
639 639 return filectx(self._repo, path, fileid=fileid,
640 640 changectx=self, filelog=filelog)
641 641
642 642 def ancestor(self, c2, warn=False):
643 643 """return the "best" ancestor context of self and c2
644 644
645 645 If there are multiple candidates, it will show a message and check
646 646 merge.preferancestor configuration before falling back to the
647 647 revlog ancestor."""
648 648 # deal with workingctxs
649 649 n2 = c2._node
650 650 if n2 is None:
651 651 n2 = c2._parents[0]._node
652 652 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
653 653 if not cahs:
654 654 anc = nullid
655 655 elif len(cahs) == 1:
656 656 anc = cahs[0]
657 657 else:
658 658 # experimental config: merge.preferancestor
659 659 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
660 660 try:
661 661 ctx = changectx(self._repo, r)
662 662 except error.RepoLookupError:
663 663 continue
664 664 anc = ctx.node()
665 665 if anc in cahs:
666 666 break
667 667 else:
668 668 anc = self._repo.changelog.ancestor(self._node, n2)
669 669 if warn:
670 670 self._repo.ui.status(
671 671 (_("note: using %s as ancestor of %s and %s\n") %
672 672 (short(anc), short(self._node), short(n2))) +
673 673 ''.join(_(" alternatively, use --config "
674 674 "merge.preferancestor=%s\n") %
675 675 short(n) for n in sorted(cahs) if n != anc))
676 676 return changectx(self._repo, anc)
677 677
678 678 def descendant(self, other):
679 679 """True if other is descendant of this changeset"""
680 680 return self._repo.changelog.descendant(self._rev, other._rev)
681 681
682 682 def walk(self, match):
683 683 '''Generates matching file names.'''
684 684
685 685 # Wrap match.bad method to have message with nodeid
686 686 def bad(fn, msg):
687 687 # The manifest doesn't know about subrepos, so don't complain about
688 688 # paths into valid subrepos.
689 689 if any(fn == s or fn.startswith(s + '/')
690 690 for s in self.substate):
691 691 return
692 692 match.bad(fn, _('no such file in rev %s') % self)
693 693
694 694 m = matchmod.badmatch(match, bad)
695 695 return self._manifest.walk(m)
696 696
697 697 def matches(self, match):
698 698 return self.walk(match)
699 699
700 700 class basefilectx(object):
701 701 """A filecontext object represents the common logic for its children:
702 702 filectx: read-only access to a filerevision that is already present
703 703 in the repo,
704 704 workingfilectx: a filecontext that represents files from the working
705 705 directory,
706 706 memfilectx: a filecontext that represents files in-memory,
707 707 overlayfilectx: duplicate another filecontext with some fields overridden.
708 708 """
709 709 @propertycache
710 710 def _filelog(self):
711 711 return self._repo.file(self._path)
712 712
713 713 @propertycache
714 714 def _changeid(self):
715 715 if r'_changeid' in self.__dict__:
716 716 return self._changeid
717 717 elif r'_changectx' in self.__dict__:
718 718 return self._changectx.rev()
719 719 elif r'_descendantrev' in self.__dict__:
720 720 # this file context was created from a revision with a known
721 721 # descendant, we can (lazily) correct for linkrev aliases
722 722 return self._adjustlinkrev(self._descendantrev)
723 723 else:
724 724 return self._filelog.linkrev(self._filerev)
725 725
726 726 @propertycache
727 727 def _filenode(self):
728 728 if r'_fileid' in self.__dict__:
729 729 return self._filelog.lookup(self._fileid)
730 730 else:
731 731 return self._changectx.filenode(self._path)
732 732
733 733 @propertycache
734 734 def _filerev(self):
735 735 return self._filelog.rev(self._filenode)
736 736
737 737 @propertycache
738 738 def _repopath(self):
739 739 return self._path
740 740
741 741 def __nonzero__(self):
742 742 try:
743 743 self._filenode
744 744 return True
745 745 except error.LookupError:
746 746 # file is missing
747 747 return False
748 748
749 749 __bool__ = __nonzero__
750 750
751 751 def __bytes__(self):
752 752 try:
753 753 return "%s@%s" % (self.path(), self._changectx)
754 754 except error.LookupError:
755 755 return "%s@???" % self.path()
756 756
757 757 __str__ = encoding.strmethod(__bytes__)
758 758
759 759 def __repr__(self):
760 760 return "<%s %s>" % (type(self).__name__, str(self))
761 761
762 762 def __hash__(self):
763 763 try:
764 764 return hash((self._path, self._filenode))
765 765 except AttributeError:
766 766 return id(self)
767 767
768 768 def __eq__(self, other):
769 769 try:
770 770 return (type(self) == type(other) and self._path == other._path
771 771 and self._filenode == other._filenode)
772 772 except AttributeError:
773 773 return False
774 774
775 775 def __ne__(self, other):
776 776 return not (self == other)
777 777
778 778 def filerev(self):
779 779 return self._filerev
780 780 def filenode(self):
781 781 return self._filenode
782 782 @propertycache
783 783 def _flags(self):
784 784 return self._changectx.flags(self._path)
785 785 def flags(self):
786 786 return self._flags
787 787 def filelog(self):
788 788 return self._filelog
789 789 def rev(self):
790 790 return self._changeid
791 791 def linkrev(self):
792 792 return self._filelog.linkrev(self._filerev)
793 793 def node(self):
794 794 return self._changectx.node()
795 795 def hex(self):
796 796 return self._changectx.hex()
797 797 def user(self):
798 798 return self._changectx.user()
799 799 def date(self):
800 800 return self._changectx.date()
801 801 def files(self):
802 802 return self._changectx.files()
803 803 def description(self):
804 804 return self._changectx.description()
805 805 def branch(self):
806 806 return self._changectx.branch()
807 807 def extra(self):
808 808 return self._changectx.extra()
809 809 def phase(self):
810 810 return self._changectx.phase()
811 811 def phasestr(self):
812 812 return self._changectx.phasestr()
813 813 def manifest(self):
814 814 return self._changectx.manifest()
815 815 def changectx(self):
816 816 return self._changectx
817 817 def renamed(self):
818 818 return self._copied
819 819 def repo(self):
820 820 return self._repo
821 821 def size(self):
822 822 return len(self.data())
823 823
824 824 def path(self):
825 825 return self._path
826 826
827 827 def isbinary(self):
828 828 try:
829 829 return util.binary(self.data())
830 830 except IOError:
831 831 return False
832 832 def isexec(self):
833 833 return 'x' in self.flags()
834 834 def islink(self):
835 835 return 'l' in self.flags()
836 836
837 837 def isabsent(self):
838 838 """whether this filectx represents a file not in self._changectx
839 839
840 840 This is mainly for merge code to detect change/delete conflicts. This is
841 841 expected to be True for all subclasses of basectx."""
842 842 return False
843 843
844 844 _customcmp = False
845 845 def cmp(self, fctx):
846 846 """compare with other file context
847 847
848 848 returns True if different than fctx.
849 849 """
850 850 if fctx._customcmp:
851 851 return fctx.cmp(self)
852 852
853 853 if (fctx._filenode is None
854 854 and (self._repo._encodefilterpats
855 855 # if file data starts with '\1\n', empty metadata block is
856 856 # prepended, which adds 4 bytes to filelog.size().
857 857 or self.size() - 4 == fctx.size())
858 858 or self.size() == fctx.size()):
859 859 return self._filelog.cmp(self._filenode, fctx.data())
860 860
861 861 return True
862 862
863 863 def _adjustlinkrev(self, srcrev, inclusive=False):
864 864 """return the first ancestor of <srcrev> introducing <fnode>
865 865
866 866 If the linkrev of the file revision does not point to an ancestor of
867 867 srcrev, we'll walk down the ancestors until we find one introducing
868 868 this file revision.
869 869
870 870 :srcrev: the changeset revision we search ancestors from
871 871 :inclusive: if true, the src revision will also be checked
872 872 """
873 873 repo = self._repo
874 874 cl = repo.unfiltered().changelog
875 875 mfl = repo.manifestlog
876 876 # fetch the linkrev
877 877 lkr = self.linkrev()
878 878 # hack to reuse ancestor computation when searching for renames
879 879 memberanc = getattr(self, '_ancestrycontext', None)
880 880 iteranc = None
881 881 if srcrev is None:
882 882 # wctx case, used by workingfilectx during mergecopy
883 883 revs = [p.rev() for p in self._repo[None].parents()]
884 884 inclusive = True # we skipped the real (revless) source
885 885 else:
886 886 revs = [srcrev]
887 887 if memberanc is None:
888 888 memberanc = iteranc = cl.ancestors(revs, lkr,
889 889 inclusive=inclusive)
890 890 # check if this linkrev is an ancestor of srcrev
891 891 if lkr not in memberanc:
892 892 if iteranc is None:
893 893 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
894 894 fnode = self._filenode
895 895 path = self._path
896 896 for a in iteranc:
897 897 ac = cl.read(a) # get changeset data (we avoid object creation)
898 898 if path in ac[3]: # checking the 'files' field.
899 899 # The file has been touched, check if the content is
900 900 # similar to the one we search for.
901 901 if fnode == mfl[ac[0]].readfast().get(path):
902 902 return a
903 903 # In theory, we should never get out of that loop without a result.
904 904 # But if manifest uses a buggy file revision (not children of the
905 905 # one it replaces) we could. Such a buggy situation will likely
906 906 # result is crash somewhere else at to some point.
907 907 return lkr
908 908
909 909 def introrev(self):
910 910 """return the rev of the changeset which introduced this file revision
911 911
912 912 This method is different from linkrev because it take into account the
913 913 changeset the filectx was created from. It ensures the returned
914 914 revision is one of its ancestors. This prevents bugs from
915 915 'linkrev-shadowing' when a file revision is used by multiple
916 916 changesets.
917 917 """
918 918 lkr = self.linkrev()
919 919 attrs = vars(self)
920 920 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
921 921 if noctx or self.rev() == lkr:
922 922 return self.linkrev()
923 923 return self._adjustlinkrev(self.rev(), inclusive=True)
924 924
925 925 def _parentfilectx(self, path, fileid, filelog):
926 926 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
927 927 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
928 928 if '_changeid' in vars(self) or '_changectx' in vars(self):
929 929 # If self is associated with a changeset (probably explicitly
930 930 # fed), ensure the created filectx is associated with a
931 931 # changeset that is an ancestor of self.changectx.
932 932 # This lets us later use _adjustlinkrev to get a correct link.
933 933 fctx._descendantrev = self.rev()
934 934 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 935 elif '_descendantrev' in vars(self):
936 936 # Otherwise propagate _descendantrev if we have one associated.
937 937 fctx._descendantrev = self._descendantrev
938 938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 939 return fctx
940 940
941 941 def parents(self):
942 942 _path = self._path
943 943 fl = self._filelog
944 944 parents = self._filelog.parents(self._filenode)
945 945 pl = [(_path, node, fl) for node in parents if node != nullid]
946 946
947 947 r = fl.renamed(self._filenode)
948 948 if r:
949 949 # - In the simple rename case, both parent are nullid, pl is empty.
950 950 # - In case of merge, only one of the parent is null id and should
951 951 # be replaced with the rename information. This parent is -always-
952 952 # the first one.
953 953 #
954 954 # As null id have always been filtered out in the previous list
955 955 # comprehension, inserting to 0 will always result in "replacing
956 956 # first nullid parent with rename information.
957 957 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
958 958
959 959 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
960 960
961 961 def p1(self):
962 962 return self.parents()[0]
963 963
964 964 def p2(self):
965 965 p = self.parents()
966 966 if len(p) == 2:
967 967 return p[1]
968 968 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
969 969
970 970 def annotate(self, follow=False, linenumber=False, skiprevs=None,
971 971 diffopts=None):
972 972 '''returns a list of tuples of ((ctx, number), line) for each line
973 973 in the file, where ctx is the filectx of the node where
974 974 that line was last changed; if linenumber parameter is true, number is
975 975 the line number at the first appearance in the managed file, otherwise,
976 976 number has a fixed value of False.
977 977 '''
978 978
979 979 def lines(text):
980 980 if text.endswith("\n"):
981 981 return text.count("\n")
982 982 return text.count("\n") + int(bool(text))
983 983
984 984 if linenumber:
985 985 def decorate(text, rev):
986 986 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
987 987 else:
988 988 def decorate(text, rev):
989 989 return ([(rev, False)] * lines(text), text)
990 990
991 991 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
992 992
993 993 def parents(f):
994 994 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
995 995 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
996 996 # from the topmost introrev (= srcrev) down to p.linkrev() if it
997 997 # isn't an ancestor of the srcrev.
998 998 f._changeid
999 999 pl = f.parents()
1000 1000
1001 1001 # Don't return renamed parents if we aren't following.
1002 1002 if not follow:
1003 1003 pl = [p for p in pl if p.path() == f.path()]
1004 1004
1005 1005 # renamed filectx won't have a filelog yet, so set it
1006 1006 # from the cache to save time
1007 1007 for p in pl:
1008 1008 if not '_filelog' in p.__dict__:
1009 1009 p._filelog = getlog(p.path())
1010 1010
1011 1011 return pl
1012 1012
1013 1013 # use linkrev to find the first changeset where self appeared
1014 1014 base = self
1015 1015 introrev = self.introrev()
1016 1016 if self.rev() != introrev:
1017 1017 base = self.filectx(self.filenode(), changeid=introrev)
1018 1018 if getattr(base, '_ancestrycontext', None) is None:
1019 1019 cl = self._repo.changelog
1020 1020 if introrev is None:
1021 1021 # wctx is not inclusive, but works because _ancestrycontext
1022 1022 # is used to test filelog revisions
1023 1023 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 1024 inclusive=True)
1025 1025 else:
1026 1026 ac = cl.ancestors([introrev], inclusive=True)
1027 1027 base._ancestrycontext = ac
1028 1028
1029 1029 # This algorithm would prefer to be recursive, but Python is a
1030 1030 # bit recursion-hostile. Instead we do an iterative
1031 1031 # depth-first search.
1032 1032
1033 1033 # 1st DFS pre-calculates pcache and needed
1034 1034 visit = [base]
1035 1035 pcache = {}
1036 1036 needed = {base: 1}
1037 1037 while visit:
1038 1038 f = visit.pop()
1039 1039 if f in pcache:
1040 1040 continue
1041 1041 pl = parents(f)
1042 1042 pcache[f] = pl
1043 1043 for p in pl:
1044 1044 needed[p] = needed.get(p, 0) + 1
1045 1045 if p not in pcache:
1046 1046 visit.append(p)
1047 1047
1048 1048 # 2nd DFS does the actual annotate
1049 1049 visit[:] = [base]
1050 1050 hist = {}
1051 1051 while visit:
1052 1052 f = visit[-1]
1053 1053 if f in hist:
1054 1054 visit.pop()
1055 1055 continue
1056 1056
1057 1057 ready = True
1058 1058 pl = pcache[f]
1059 1059 for p in pl:
1060 1060 if p not in hist:
1061 1061 ready = False
1062 1062 visit.append(p)
1063 1063 if ready:
1064 1064 visit.pop()
1065 1065 curr = decorate(f.data(), f)
1066 1066 skipchild = False
1067 1067 if skiprevs is not None:
1068 1068 skipchild = f._changeid in skiprevs
1069 1069 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 1070 diffopts)
1071 1071 for p in pl:
1072 1072 if needed[p] == 1:
1073 1073 del hist[p]
1074 1074 del needed[p]
1075 1075 else:
1076 1076 needed[p] -= 1
1077 1077
1078 1078 hist[f] = curr
1079 1079 del pcache[f]
1080 1080
1081 1081 return zip(hist[base][0], hist[base][1].splitlines(True))
1082 1082
1083 1083 def ancestors(self, followfirst=False):
1084 1084 visit = {}
1085 1085 c = self
1086 1086 if followfirst:
1087 1087 cut = 1
1088 1088 else:
1089 1089 cut = None
1090 1090
1091 1091 while True:
1092 1092 for parent in c.parents()[:cut]:
1093 1093 visit[(parent.linkrev(), parent.filenode())] = parent
1094 1094 if not visit:
1095 1095 break
1096 1096 c = visit.pop(max(visit))
1097 1097 yield c
1098 1098
1099 1099 def decodeddata(self):
1100 1100 """Returns `data()` after running repository decoding filters.
1101 1101
1102 1102 This is often equivalent to how the data would be expressed on disk.
1103 1103 """
1104 1104 return self._repo.wwritedata(self.path(), self.data())
1105 1105
1106 1106 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1107 1107 r'''
1108 1108 Given parent and child fctxes and annotate data for parents, for all lines
1109 1109 in either parent that match the child, annotate the child with the parent's
1110 1110 data.
1111 1111
1112 1112 Additionally, if `skipchild` is True, replace all other lines with parent
1113 1113 annotate data as well such that child is never blamed for any lines.
1114 1114
1115 1115 >>> oldfctx = 'old'
1116 1116 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1117 1117 >>> olddata = 'a\nb\n'
1118 1118 >>> p1data = 'a\nb\nc\n'
1119 1119 >>> p2data = 'a\nc\nd\n'
1120 1120 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1121 1121 >>> diffopts = mdiff.diffopts()
1122 1122
1123 1123 >>> def decorate(text, rev):
1124 1124 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1125 1125
1126 1126 Basic usage:
1127 1127
1128 1128 >>> oldann = decorate(olddata, oldfctx)
1129 1129 >>> p1ann = decorate(p1data, p1fctx)
1130 1130 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1131 1131 >>> p1ann[0]
1132 1132 [('old', 1), ('old', 2), ('p1', 3)]
1133 1133 >>> p2ann = decorate(p2data, p2fctx)
1134 1134 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1135 1135 >>> p2ann[0]
1136 1136 [('old', 1), ('p2', 2), ('p2', 3)]
1137 1137
1138 1138 Test with multiple parents (note the difference caused by ordering):
1139 1139
1140 1140 >>> childann = decorate(childdata, childfctx)
1141 1141 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1142 1142 ... diffopts)
1143 1143 >>> childann[0]
1144 1144 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1145 1145
1146 1146 >>> childann = decorate(childdata, childfctx)
1147 1147 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1148 1148 ... diffopts)
1149 1149 >>> childann[0]
1150 1150 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1151 1151
1152 1152 Test with skipchild (note the difference caused by ordering):
1153 1153
1154 1154 >>> childann = decorate(childdata, childfctx)
1155 1155 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1156 1156 ... diffopts)
1157 1157 >>> childann[0]
1158 1158 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1159 1159
1160 1160 >>> childann = decorate(childdata, childfctx)
1161 1161 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1162 1162 ... diffopts)
1163 1163 >>> childann[0]
1164 1164 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1165 1165 '''
1166 1166 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1167 1167 for parent in parents]
1168 1168
1169 1169 if skipchild:
1170 1170 # Need to iterate over the blocks twice -- make it a list
1171 1171 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1172 1172 # Mercurial currently prefers p2 over p1 for annotate.
1173 1173 # TODO: change this?
1174 1174 for parent, blocks in pblocks:
1175 1175 for (a1, a2, b1, b2), t in blocks:
1176 1176 # Changed blocks ('!') or blocks made only of blank lines ('~')
1177 1177 # belong to the child.
1178 1178 if t == '=':
1179 1179 child[0][b1:b2] = parent[0][a1:a2]
1180 1180
1181 1181 if skipchild:
1182 1182 # Now try and match up anything that couldn't be matched,
1183 1183 # Reversing pblocks maintains bias towards p2, matching above
1184 1184 # behavior.
1185 1185 pblocks.reverse()
1186 1186
1187 1187 # The heuristics are:
1188 1188 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1189 1189 # This could potentially be smarter but works well enough.
1190 1190 # * For a non-matching section, do a best-effort fit. Match lines in
1191 1191 # diff hunks 1:1, dropping lines as necessary.
1192 1192 # * Repeat the last line as a last resort.
1193 1193
1194 1194 # First, replace as much as possible without repeating the last line.
1195 1195 remaining = [(parent, []) for parent, _blocks in pblocks]
1196 1196 for idx, (parent, blocks) in enumerate(pblocks):
1197 1197 for (a1, a2, b1, b2), _t in blocks:
1198 1198 if a2 - a1 >= b2 - b1:
1199 1199 for bk in xrange(b1, b2):
1200 1200 if child[0][bk][0] == childfctx:
1201 1201 ak = min(a1 + (bk - b1), a2 - 1)
1202 1202 child[0][bk] = parent[0][ak]
1203 1203 else:
1204 1204 remaining[idx][1].append((a1, a2, b1, b2))
1205 1205
1206 1206 # Then, look at anything left, which might involve repeating the last
1207 1207 # line.
1208 1208 for parent, blocks in remaining:
1209 1209 for a1, a2, b1, b2 in blocks:
1210 1210 for bk in xrange(b1, b2):
1211 1211 if child[0][bk][0] == childfctx:
1212 1212 ak = min(a1 + (bk - b1), a2 - 1)
1213 1213 child[0][bk] = parent[0][ak]
1214 1214 return child
1215 1215
1216 1216 class filectx(basefilectx):
1217 1217 """A filecontext object makes access to data related to a particular
1218 1218 filerevision convenient."""
1219 1219 def __init__(self, repo, path, changeid=None, fileid=None,
1220 1220 filelog=None, changectx=None):
1221 1221 """changeid can be a changeset revision, node, or tag.
1222 1222 fileid can be a file revision or node."""
1223 1223 self._repo = repo
1224 1224 self._path = path
1225 1225
1226 1226 assert (changeid is not None
1227 1227 or fileid is not None
1228 1228 or changectx is not None), \
1229 1229 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1230 1230 % (changeid, fileid, changectx))
1231 1231
1232 1232 if filelog is not None:
1233 1233 self._filelog = filelog
1234 1234
1235 1235 if changeid is not None:
1236 1236 self._changeid = changeid
1237 1237 if changectx is not None:
1238 1238 self._changectx = changectx
1239 1239 if fileid is not None:
1240 1240 self._fileid = fileid
1241 1241
1242 1242 @propertycache
1243 1243 def _changectx(self):
1244 1244 try:
1245 1245 return changectx(self._repo, self._changeid)
1246 1246 except error.FilteredRepoLookupError:
1247 1247 # Linkrev may point to any revision in the repository. When the
1248 1248 # repository is filtered this may lead to `filectx` trying to build
1249 1249 # `changectx` for filtered revision. In such case we fallback to
1250 1250 # creating `changectx` on the unfiltered version of the reposition.
1251 1251 # This fallback should not be an issue because `changectx` from
1252 1252 # `filectx` are not used in complex operations that care about
1253 1253 # filtering.
1254 1254 #
1255 1255 # This fallback is a cheap and dirty fix that prevent several
1256 1256 # crashes. It does not ensure the behavior is correct. However the
1257 1257 # behavior was not correct before filtering either and "incorrect
1258 1258 # behavior" is seen as better as "crash"
1259 1259 #
1260 1260 # Linkrevs have several serious troubles with filtering that are
1261 1261 # complicated to solve. Proper handling of the issue here should be
1262 1262 # considered when solving linkrev issue are on the table.
1263 1263 return changectx(self._repo.unfiltered(), self._changeid)
1264 1264
1265 1265 def filectx(self, fileid, changeid=None):
1266 1266 '''opens an arbitrary revision of the file without
1267 1267 opening a new filelog'''
1268 1268 return filectx(self._repo, self._path, fileid=fileid,
1269 1269 filelog=self._filelog, changeid=changeid)
1270 1270
1271 1271 def rawdata(self):
1272 1272 return self._filelog.revision(self._filenode, raw=True)
1273 1273
1274 1274 def rawflags(self):
1275 1275 """low-level revlog flags"""
1276 1276 return self._filelog.flags(self._filerev)
1277 1277
1278 1278 def data(self):
1279 1279 try:
1280 1280 return self._filelog.read(self._filenode)
1281 1281 except error.CensoredNodeError:
1282 1282 if self._repo.ui.config("censor", "policy") == "ignore":
1283 1283 return ""
1284 1284 raise error.Abort(_("censored node: %s") % short(self._filenode),
1285 1285 hint=_("set censor.policy to ignore errors"))
1286 1286
1287 1287 def size(self):
1288 1288 return self._filelog.size(self._filerev)
1289 1289
1290 1290 @propertycache
1291 1291 def _copied(self):
1292 1292 """check if file was actually renamed in this changeset revision
1293 1293
1294 1294 If rename logged in file revision, we report copy for changeset only
1295 1295 if file revisions linkrev points back to the changeset in question
1296 1296 or both changeset parents contain different file revisions.
1297 1297 """
1298 1298
1299 1299 renamed = self._filelog.renamed(self._filenode)
1300 1300 if not renamed:
1301 1301 return renamed
1302 1302
1303 1303 if self.rev() == self.linkrev():
1304 1304 return renamed
1305 1305
1306 1306 name = self.path()
1307 1307 fnode = self._filenode
1308 1308 for p in self._changectx.parents():
1309 1309 try:
1310 1310 if fnode == p.filenode(name):
1311 1311 return None
1312 1312 except error.LookupError:
1313 1313 pass
1314 1314 return renamed
1315 1315
1316 1316 def children(self):
1317 1317 # hard for renames
1318 1318 c = self._filelog.children(self._filenode)
1319 1319 return [filectx(self._repo, self._path, fileid=x,
1320 1320 filelog=self._filelog) for x in c]
1321 1321
1322 1322 class committablectx(basectx):
1323 1323 """A committablectx object provides common functionality for a context that
1324 1324 wants the ability to commit, e.g. workingctx or memctx."""
1325 1325 def __init__(self, repo, text="", user=None, date=None, extra=None,
1326 1326 changes=None):
1327 1327 self._repo = repo
1328 1328 self._rev = None
1329 1329 self._node = None
1330 1330 self._text = text
1331 1331 if date:
1332 1332 self._date = util.parsedate(date)
1333 1333 if user:
1334 1334 self._user = user
1335 1335 if changes:
1336 1336 self._status = changes
1337 1337
1338 1338 self._extra = {}
1339 1339 if extra:
1340 1340 self._extra = extra.copy()
1341 1341 if 'branch' not in self._extra:
1342 1342 try:
1343 1343 branch = encoding.fromlocal(self._repo.dirstate.branch())
1344 1344 except UnicodeDecodeError:
1345 1345 raise error.Abort(_('branch name not in UTF-8!'))
1346 1346 self._extra['branch'] = branch
1347 1347 if self._extra['branch'] == '':
1348 1348 self._extra['branch'] = 'default'
1349 1349
1350 1350 def __bytes__(self):
1351 1351 return bytes(self._parents[0]) + "+"
1352 1352
1353 1353 __str__ = encoding.strmethod(__bytes__)
1354 1354
1355 1355 def __nonzero__(self):
1356 1356 return True
1357 1357
1358 1358 __bool__ = __nonzero__
1359 1359
1360 1360 def _buildflagfunc(self):
1361 1361 # Create a fallback function for getting file flags when the
1362 1362 # filesystem doesn't support them
1363 1363
1364 1364 copiesget = self._repo.dirstate.copies().get
1365 1365 parents = self.parents()
1366 1366 if len(parents) < 2:
1367 1367 # when we have one parent, it's easy: copy from parent
1368 1368 man = parents[0].manifest()
1369 1369 def func(f):
1370 1370 f = copiesget(f, f)
1371 1371 return man.flags(f)
1372 1372 else:
1373 1373 # merges are tricky: we try to reconstruct the unstored
1374 1374 # result from the merge (issue1802)
1375 1375 p1, p2 = parents
1376 1376 pa = p1.ancestor(p2)
1377 1377 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1378 1378
1379 1379 def func(f):
1380 1380 f = copiesget(f, f) # may be wrong for merges with copies
1381 1381 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1382 1382 if fl1 == fl2:
1383 1383 return fl1
1384 1384 if fl1 == fla:
1385 1385 return fl2
1386 1386 if fl2 == fla:
1387 1387 return fl1
1388 1388 return '' # punt for conflicts
1389 1389
1390 1390 return func
1391 1391
1392 1392 @propertycache
1393 1393 def _flagfunc(self):
1394 1394 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1395 1395
1396 1396 @propertycache
1397 1397 def _status(self):
1398 1398 return self._repo.status()
1399 1399
1400 1400 @propertycache
1401 1401 def _user(self):
1402 1402 return self._repo.ui.username()
1403 1403
1404 1404 @propertycache
1405 1405 def _date(self):
1406 1406 ui = self._repo.ui
1407 1407 date = ui.configdate('devel', 'default-date')
1408 1408 if date is None:
1409 1409 date = util.makedate()
1410 1410 return date
1411 1411
1412 1412 def subrev(self, subpath):
1413 1413 return None
1414 1414
1415 1415 def manifestnode(self):
1416 1416 return None
1417 1417 def user(self):
1418 1418 return self._user or self._repo.ui.username()
1419 1419 def date(self):
1420 1420 return self._date
1421 1421 def description(self):
1422 1422 return self._text
1423 1423 def files(self):
1424 1424 return sorted(self._status.modified + self._status.added +
1425 1425 self._status.removed)
1426 1426
1427 1427 def modified(self):
1428 1428 return self._status.modified
1429 1429 def added(self):
1430 1430 return self._status.added
1431 1431 def removed(self):
1432 1432 return self._status.removed
1433 1433 def deleted(self):
1434 1434 return self._status.deleted
1435 1435 def branch(self):
1436 1436 return encoding.tolocal(self._extra['branch'])
1437 1437 def closesbranch(self):
1438 1438 return 'close' in self._extra
1439 1439 def extra(self):
1440 1440 return self._extra
1441 1441
1442 1442 def tags(self):
1443 1443 return []
1444 1444
1445 1445 def bookmarks(self):
1446 1446 b = []
1447 1447 for p in self.parents():
1448 1448 b.extend(p.bookmarks())
1449 1449 return b
1450 1450
1451 1451 def phase(self):
1452 1452 phase = phases.draft # default phase to draft
1453 1453 for p in self.parents():
1454 1454 phase = max(phase, p.phase())
1455 1455 return phase
1456 1456
1457 1457 def hidden(self):
1458 1458 return False
1459 1459
1460 1460 def children(self):
1461 1461 return []
1462 1462
1463 1463 def flags(self, path):
1464 1464 if r'_manifest' in self.__dict__:
1465 1465 try:
1466 1466 return self._manifest.flags(path)
1467 1467 except KeyError:
1468 1468 return ''
1469 1469
1470 1470 try:
1471 1471 return self._flagfunc(path)
1472 1472 except OSError:
1473 1473 return ''
1474 1474
1475 1475 def ancestor(self, c2):
1476 1476 """return the "best" ancestor context of self and c2"""
1477 1477 return self._parents[0].ancestor(c2) # punt on two parents for now
1478 1478
1479 1479 def walk(self, match):
1480 1480 '''Generates matching file names.'''
1481 1481 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1482 1482 True, False))
1483 1483
1484 1484 def matches(self, match):
1485 1485 return sorted(self._repo.dirstate.matches(match))
1486 1486
1487 1487 def ancestors(self):
1488 1488 for p in self._parents:
1489 1489 yield p
1490 1490 for a in self._repo.changelog.ancestors(
1491 1491 [p.rev() for p in self._parents]):
1492 1492 yield changectx(self._repo, a)
1493 1493
1494 1494 def markcommitted(self, node):
1495 1495 """Perform post-commit cleanup necessary after committing this ctx
1496 1496
1497 1497 Specifically, this updates backing stores this working context
1498 1498 wraps to reflect the fact that the changes reflected by this
1499 1499 workingctx have been committed. For example, it marks
1500 1500 modified and added files as normal in the dirstate.
1501 1501
1502 1502 """
1503 1503
1504 1504 with self._repo.dirstate.parentchange():
1505 1505 for f in self.modified() + self.added():
1506 1506 self._repo.dirstate.normal(f)
1507 1507 for f in self.removed():
1508 1508 self._repo.dirstate.drop(f)
1509 1509 self._repo.dirstate.setparents(node)
1510 1510
1511 1511 # write changes out explicitly, because nesting wlock at
1512 1512 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1513 1513 # from immediately doing so for subsequent changing files
1514 1514 self._repo.dirstate.write(self._repo.currenttransaction())
1515 1515
1516 1516 def dirty(self, missing=False, merge=True, branch=True):
1517 1517 return False
1518 1518
1519 1519 class workingctx(committablectx):
1520 1520 """A workingctx object makes access to data related to
1521 1521 the current working directory convenient.
1522 1522 date - any valid date string or (unixtime, offset), or None.
1523 1523 user - username string, or None.
1524 1524 extra - a dictionary of extra values, or None.
1525 1525 changes - a list of file lists as returned by localrepo.status()
1526 1526 or None to use the repository status.
1527 1527 """
1528 1528 def __init__(self, repo, text="", user=None, date=None, extra=None,
1529 1529 changes=None):
1530 1530 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1531 1531
1532 1532 def __iter__(self):
1533 1533 d = self._repo.dirstate
1534 1534 for f in d:
1535 1535 if d[f] != 'r':
1536 1536 yield f
1537 1537
1538 1538 def __contains__(self, key):
1539 1539 return self._repo.dirstate[key] not in "?r"
1540 1540
1541 1541 def hex(self):
1542 1542 return hex(wdirid)
1543 1543
1544 1544 @propertycache
1545 1545 def _parents(self):
1546 1546 p = self._repo.dirstate.parents()
1547 1547 if p[1] == nullid:
1548 1548 p = p[:-1]
1549 1549 return [changectx(self._repo, x) for x in p]
1550 1550
1551 1551 def filectx(self, path, filelog=None):
1552 1552 """get a file context from the working directory"""
1553 1553 return workingfilectx(self._repo, path, workingctx=self,
1554 1554 filelog=filelog)
1555 1555
1556 1556 def dirty(self, missing=False, merge=True, branch=True):
1557 1557 "check whether a working directory is modified"
1558 1558 # check subrepos first
1559 1559 for s in sorted(self.substate):
1560 1560 if self.sub(s).dirty(missing=missing):
1561 1561 return True
1562 1562 # check current working dir
1563 1563 return ((merge and self.p2()) or
1564 1564 (branch and self.branch() != self.p1().branch()) or
1565 1565 self.modified() or self.added() or self.removed() or
1566 1566 (missing and self.deleted()))
1567 1567
1568 1568 def add(self, list, prefix=""):
1569 1569 with self._repo.wlock():
1570 1570 ui, ds = self._repo.ui, self._repo.dirstate
1571 1571 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1572 1572 rejected = []
1573 1573 lstat = self._repo.wvfs.lstat
1574 1574 for f in list:
1575 1575 # ds.pathto() returns an absolute file when this is invoked from
1576 1576 # the keyword extension. That gets flagged as non-portable on
1577 1577 # Windows, since it contains the drive letter and colon.
1578 1578 scmutil.checkportable(ui, os.path.join(prefix, f))
1579 1579 try:
1580 1580 st = lstat(f)
1581 1581 except OSError:
1582 1582 ui.warn(_("%s does not exist!\n") % uipath(f))
1583 1583 rejected.append(f)
1584 1584 continue
1585 1585 if st.st_size > 10000000:
1586 1586 ui.warn(_("%s: up to %d MB of RAM may be required "
1587 1587 "to manage this file\n"
1588 1588 "(use 'hg revert %s' to cancel the "
1589 1589 "pending addition)\n")
1590 1590 % (f, 3 * st.st_size // 1000000, uipath(f)))
1591 1591 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1592 1592 ui.warn(_("%s not added: only files and symlinks "
1593 1593 "supported currently\n") % uipath(f))
1594 1594 rejected.append(f)
1595 1595 elif ds[f] in 'amn':
1596 1596 ui.warn(_("%s already tracked!\n") % uipath(f))
1597 1597 elif ds[f] == 'r':
1598 1598 ds.normallookup(f)
1599 1599 else:
1600 1600 ds.add(f)
1601 1601 return rejected
1602 1602
1603 1603 def forget(self, files, prefix=""):
1604 1604 with self._repo.wlock():
1605 1605 ds = self._repo.dirstate
1606 1606 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1607 1607 rejected = []
1608 1608 for f in files:
1609 1609 if f not in self._repo.dirstate:
1610 1610 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1611 1611 rejected.append(f)
1612 1612 elif self._repo.dirstate[f] != 'a':
1613 1613 self._repo.dirstate.remove(f)
1614 1614 else:
1615 1615 self._repo.dirstate.drop(f)
1616 1616 return rejected
1617 1617
1618 1618 def undelete(self, list):
1619 1619 pctxs = self.parents()
1620 1620 with self._repo.wlock():
1621 1621 ds = self._repo.dirstate
1622 1622 for f in list:
1623 1623 if self._repo.dirstate[f] != 'r':
1624 1624 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1625 1625 else:
1626 1626 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1627 1627 t = fctx.data()
1628 1628 self._repo.wwrite(f, t, fctx.flags())
1629 1629 self._repo.dirstate.normal(f)
1630 1630
1631 1631 def copy(self, source, dest):
1632 1632 try:
1633 1633 st = self._repo.wvfs.lstat(dest)
1634 1634 except OSError as err:
1635 1635 if err.errno != errno.ENOENT:
1636 1636 raise
1637 1637 self._repo.ui.warn(_("%s does not exist!\n")
1638 1638 % self._repo.dirstate.pathto(dest))
1639 1639 return
1640 1640 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1641 1641 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1642 1642 "symbolic link\n")
1643 1643 % self._repo.dirstate.pathto(dest))
1644 1644 else:
1645 1645 with self._repo.wlock():
1646 1646 if self._repo.dirstate[dest] in '?':
1647 1647 self._repo.dirstate.add(dest)
1648 1648 elif self._repo.dirstate[dest] in 'r':
1649 1649 self._repo.dirstate.normallookup(dest)
1650 1650 self._repo.dirstate.copy(source, dest)
1651 1651
1652 1652 def match(self, pats=None, include=None, exclude=None, default='glob',
1653 1653 listsubrepos=False, badfn=None):
1654 1654 r = self._repo
1655 1655
1656 1656 # Only a case insensitive filesystem needs magic to translate user input
1657 1657 # to actual case in the filesystem.
1658 1658 icasefs = not util.fscasesensitive(r.root)
1659 1659 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1660 1660 default, auditor=r.auditor, ctx=self,
1661 1661 listsubrepos=listsubrepos, badfn=badfn,
1662 1662 icasefs=icasefs)
1663 1663
1664 1664 def _filtersuspectsymlink(self, files):
1665 1665 if not files or self._repo.dirstate._checklink:
1666 1666 return files
1667 1667
1668 1668 # Symlink placeholders may get non-symlink-like contents
1669 1669 # via user error or dereferencing by NFS or Samba servers,
1670 1670 # so we filter out any placeholders that don't look like a
1671 1671 # symlink
1672 1672 sane = []
1673 1673 for f in files:
1674 1674 if self.flags(f) == 'l':
1675 1675 d = self[f].data()
1676 1676 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1677 1677 self._repo.ui.debug('ignoring suspect symlink placeholder'
1678 1678 ' "%s"\n' % f)
1679 1679 continue
1680 1680 sane.append(f)
1681 1681 return sane
1682 1682
1683 1683 def _checklookup(self, files):
1684 1684 # check for any possibly clean files
1685 1685 if not files:
1686 1686 return [], [], []
1687 1687
1688 1688 modified = []
1689 1689 deleted = []
1690 1690 fixup = []
1691 1691 pctx = self._parents[0]
1692 1692 # do a full compare of any files that might have changed
1693 1693 for f in sorted(files):
1694 1694 try:
1695 1695 # This will return True for a file that got replaced by a
1696 1696 # directory in the interim, but fixing that is pretty hard.
1697 1697 if (f not in pctx or self.flags(f) != pctx.flags(f)
1698 1698 or pctx[f].cmp(self[f])):
1699 1699 modified.append(f)
1700 1700 else:
1701 1701 fixup.append(f)
1702 1702 except (IOError, OSError):
1703 1703 # A file become inaccessible in between? Mark it as deleted,
1704 1704 # matching dirstate behavior (issue5584).
1705 1705 # The dirstate has more complex behavior around whether a
1706 1706 # missing file matches a directory, etc, but we don't need to
1707 1707 # bother with that: if f has made it to this point, we're sure
1708 1708 # it's in the dirstate.
1709 1709 deleted.append(f)
1710 1710
1711 1711 return modified, deleted, fixup
1712 1712
1713 1713 def _poststatusfixup(self, status, fixup):
1714 1714 """update dirstate for files that are actually clean"""
1715 1715 poststatus = self._repo.postdsstatus()
1716 1716 if fixup or poststatus:
1717 1717 try:
1718 1718 oldid = self._repo.dirstate.identity()
1719 1719
1720 1720 # updating the dirstate is optional
1721 1721 # so we don't wait on the lock
1722 1722 # wlock can invalidate the dirstate, so cache normal _after_
1723 1723 # taking the lock
1724 1724 with self._repo.wlock(False):
1725 1725 if self._repo.dirstate.identity() == oldid:
1726 1726 if fixup:
1727 1727 normal = self._repo.dirstate.normal
1728 1728 for f in fixup:
1729 1729 normal(f)
1730 1730 # write changes out explicitly, because nesting
1731 1731 # wlock at runtime may prevent 'wlock.release()'
1732 1732 # after this block from doing so for subsequent
1733 1733 # changing files
1734 1734 tr = self._repo.currenttransaction()
1735 1735 self._repo.dirstate.write(tr)
1736 1736
1737 1737 if poststatus:
1738 1738 for ps in poststatus:
1739 1739 ps(self, status)
1740 1740 else:
1741 1741 # in this case, writing changes out breaks
1742 1742 # consistency, because .hg/dirstate was
1743 1743 # already changed simultaneously after last
1744 1744 # caching (see also issue5584 for detail)
1745 1745 self._repo.ui.debug('skip updating dirstate: '
1746 1746 'identity mismatch\n')
1747 1747 except error.LockError:
1748 1748 pass
1749 1749 finally:
1750 1750 # Even if the wlock couldn't be grabbed, clear out the list.
1751 1751 self._repo.clearpostdsstatus()
1752 1752
1753 1753 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1754 1754 '''Gets the status from the dirstate -- internal use only.'''
1755 1755 listignored, listclean, listunknown = ignored, clean, unknown
1756 1756 subrepos = []
1757 1757 if '.hgsub' in self:
1758 1758 subrepos = sorted(self.substate)
1759 1759 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1760 1760 listclean, listunknown)
1761 1761
1762 1762 # check for any possibly clean files
1763 1763 fixup = []
1764 1764 if cmp:
1765 1765 modified2, deleted2, fixup = self._checklookup(cmp)
1766 1766 s.modified.extend(modified2)
1767 1767 s.deleted.extend(deleted2)
1768 1768
1769 1769 if fixup and listclean:
1770 1770 s.clean.extend(fixup)
1771 1771
1772 1772 self._poststatusfixup(s, fixup)
1773 1773
1774 1774 if match.always():
1775 1775 # cache for performance
1776 1776 if s.unknown or s.ignored or s.clean:
1777 1777 # "_status" is cached with list*=False in the normal route
1778 1778 self._status = scmutil.status(s.modified, s.added, s.removed,
1779 1779 s.deleted, [], [], [])
1780 1780 else:
1781 1781 self._status = s
1782 1782
1783 1783 return s
1784 1784
1785 1785 @propertycache
1786 1786 def _manifest(self):
1787 1787 """generate a manifest corresponding to the values in self._status
1788 1788
1789 1789 This reuse the file nodeid from parent, but we use special node
1790 1790 identifiers for added and modified files. This is used by manifests
1791 1791 merge to see that files are different and by update logic to avoid
1792 1792 deleting newly added files.
1793 1793 """
1794 1794 return self._buildstatusmanifest(self._status)
1795 1795
1796 1796 def _buildstatusmanifest(self, status):
1797 1797 """Builds a manifest that includes the given status results."""
1798 1798 parents = self.parents()
1799 1799
1800 1800 man = parents[0].manifest().copy()
1801 1801
1802 1802 ff = self._flagfunc
1803 1803 for i, l in ((addednodeid, status.added),
1804 1804 (modifiednodeid, status.modified)):
1805 1805 for f in l:
1806 1806 man[f] = i
1807 1807 try:
1808 1808 man.setflag(f, ff(f))
1809 1809 except OSError:
1810 1810 pass
1811 1811
1812 1812 for f in status.deleted + status.removed:
1813 1813 if f in man:
1814 1814 del man[f]
1815 1815
1816 1816 return man
1817 1817
1818 1818 def _buildstatus(self, other, s, match, listignored, listclean,
1819 1819 listunknown):
1820 1820 """build a status with respect to another context
1821 1821
1822 1822 This includes logic for maintaining the fast path of status when
1823 1823 comparing the working directory against its parent, which is to skip
1824 1824 building a new manifest if self (working directory) is not comparing
1825 1825 against its parent (repo['.']).
1826 1826 """
1827 1827 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1828 1828 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1829 1829 # might have accidentally ended up with the entire contents of the file
1830 1830 # they are supposed to be linking to.
1831 1831 s.modified[:] = self._filtersuspectsymlink(s.modified)
1832 1832 if other != self._repo['.']:
1833 1833 s = super(workingctx, self)._buildstatus(other, s, match,
1834 1834 listignored, listclean,
1835 1835 listunknown)
1836 1836 return s
1837 1837
1838 1838 def _matchstatus(self, other, match):
1839 1839 """override the match method with a filter for directory patterns
1840 1840
1841 1841 We use inheritance to customize the match.bad method only in cases of
1842 1842 workingctx since it belongs only to the working directory when
1843 1843 comparing against the parent changeset.
1844 1844
1845 1845 If we aren't comparing against the working directory's parent, then we
1846 1846 just use the default match object sent to us.
1847 1847 """
1848 1848 if other != self._repo['.']:
1849 1849 def bad(f, msg):
1850 1850 # 'f' may be a directory pattern from 'match.files()',
1851 1851 # so 'f not in ctx1' is not enough
1852 1852 if f not in other and not other.hasdir(f):
1853 1853 self._repo.ui.warn('%s: %s\n' %
1854 1854 (self._repo.dirstate.pathto(f), msg))
1855 1855 match.bad = bad
1856 1856 return match
1857 1857
1858 1858 def markcommitted(self, node):
1859 1859 super(workingctx, self).markcommitted(node)
1860 1860
1861 1861 sparse.aftercommit(self._repo, node)
1862 1862
1863 1863 class committablefilectx(basefilectx):
1864 1864 """A committablefilectx provides common functionality for a file context
1865 1865 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1866 1866 def __init__(self, repo, path, filelog=None, ctx=None):
1867 1867 self._repo = repo
1868 1868 self._path = path
1869 1869 self._changeid = None
1870 1870 self._filerev = self._filenode = None
1871 1871
1872 1872 if filelog is not None:
1873 1873 self._filelog = filelog
1874 1874 if ctx:
1875 1875 self._changectx = ctx
1876 1876
1877 1877 def __nonzero__(self):
1878 1878 return True
1879 1879
1880 1880 __bool__ = __nonzero__
1881 1881
1882 1882 def linkrev(self):
1883 1883 # linked to self._changectx no matter if file is modified or not
1884 1884 return self.rev()
1885 1885
1886 1886 def parents(self):
1887 1887 '''return parent filectxs, following copies if necessary'''
1888 1888 def filenode(ctx, path):
1889 1889 return ctx._manifest.get(path, nullid)
1890 1890
1891 1891 path = self._path
1892 1892 fl = self._filelog
1893 1893 pcl = self._changectx._parents
1894 1894 renamed = self.renamed()
1895 1895
1896 1896 if renamed:
1897 1897 pl = [renamed + (None,)]
1898 1898 else:
1899 1899 pl = [(path, filenode(pcl[0], path), fl)]
1900 1900
1901 1901 for pc in pcl[1:]:
1902 1902 pl.append((path, filenode(pc, path), fl))
1903 1903
1904 1904 return [self._parentfilectx(p, fileid=n, filelog=l)
1905 1905 for p, n, l in pl if n != nullid]
1906 1906
1907 1907 def children(self):
1908 1908 return []
1909 1909
1910 1910 class workingfilectx(committablefilectx):
1911 1911 """A workingfilectx object makes access to data related to a particular
1912 1912 file in the working directory convenient."""
1913 1913 def __init__(self, repo, path, filelog=None, workingctx=None):
1914 1914 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1915 1915
1916 1916 @propertycache
1917 1917 def _changectx(self):
1918 1918 return workingctx(self._repo)
1919 1919
1920 1920 def data(self):
1921 1921 return self._repo.wread(self._path)
1922 1922 def renamed(self):
1923 1923 rp = self._repo.dirstate.copied(self._path)
1924 1924 if not rp:
1925 1925 return None
1926 1926 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1927 1927
1928 1928 def size(self):
1929 1929 return self._repo.wvfs.lstat(self._path).st_size
1930 1930 def date(self):
1931 1931 t, tz = self._changectx.date()
1932 1932 try:
1933 1933 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1934 1934 except OSError as err:
1935 1935 if err.errno != errno.ENOENT:
1936 1936 raise
1937 1937 return (t, tz)
1938 1938
1939 1939 def exists(self):
1940 1940 return self._repo.wvfs.exists(self._path)
1941 1941
1942 1942 def lexists(self):
1943 1943 return self._repo.wvfs.lexists(self._path)
1944 1944
1945 1945 def audit(self):
1946 1946 return self._repo.wvfs.audit(self._path)
1947 1947
1948 1948 def cmp(self, fctx):
1949 1949 """compare with other file context
1950 1950
1951 1951 returns True if different than fctx.
1952 1952 """
1953 1953 # fctx should be a filectx (not a workingfilectx)
1954 1954 # invert comparison to reuse the same code path
1955 1955 return fctx.cmp(self)
1956 1956
1957 1957 def remove(self, ignoremissing=False):
1958 1958 """wraps unlink for a repo's working directory"""
1959 1959 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1960 1960
1961 1961 def write(self, data, flags, backgroundclose=False):
1962 1962 """wraps repo.wwrite"""
1963 1963 self._repo.wwrite(self._path, data, flags,
1964 1964 backgroundclose=backgroundclose)
1965 1965
1966 1966 def clearunknown(self):
1967 1967 """Removes conflicting items in the working directory so that
1968 1968 ``write()`` can be called successfully.
1969 1969 """
1970 1970 wvfs = self._repo.wvfs
1971 1971 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1972 1972 wvfs.removedirs(self._path)
1973 1973
1974 1974 def setflags(self, l, x):
1975 1975 self._repo.wvfs.setflags(self._path, l, x)
1976 1976
1977 1977 class workingcommitctx(workingctx):
1978 1978 """A workingcommitctx object makes access to data related to
1979 1979 the revision being committed convenient.
1980 1980
1981 1981 This hides changes in the working directory, if they aren't
1982 1982 committed in this context.
1983 1983 """
1984 1984 def __init__(self, repo, changes,
1985 1985 text="", user=None, date=None, extra=None):
1986 1986 super(workingctx, self).__init__(repo, text, user, date, extra,
1987 1987 changes)
1988 1988
1989 1989 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1990 1990 """Return matched files only in ``self._status``
1991 1991
1992 1992 Uncommitted files appear "clean" via this context, even if
1993 1993 they aren't actually so in the working directory.
1994 1994 """
1995 1995 if clean:
1996 1996 clean = [f for f in self._manifest if f not in self._changedset]
1997 1997 else:
1998 1998 clean = []
1999 1999 return scmutil.status([f for f in self._status.modified if match(f)],
2000 2000 [f for f in self._status.added if match(f)],
2001 2001 [f for f in self._status.removed if match(f)],
2002 2002 [], [], [], clean)
2003 2003
2004 2004 @propertycache
2005 2005 def _changedset(self):
2006 2006 """Return the set of files changed in this context
2007 2007 """
2008 2008 changed = set(self._status.modified)
2009 2009 changed.update(self._status.added)
2010 2010 changed.update(self._status.removed)
2011 2011 return changed
2012 2012
2013 2013 def makecachingfilectxfn(func):
2014 2014 """Create a filectxfn that caches based on the path.
2015 2015
2016 2016 We can't use util.cachefunc because it uses all arguments as the cache
2017 2017 key and this creates a cycle since the arguments include the repo and
2018 2018 memctx.
2019 2019 """
2020 2020 cache = {}
2021 2021
2022 2022 def getfilectx(repo, memctx, path):
2023 2023 if path not in cache:
2024 2024 cache[path] = func(repo, memctx, path)
2025 2025 return cache[path]
2026 2026
2027 2027 return getfilectx
2028 2028
2029 2029 def memfilefromctx(ctx):
2030 2030 """Given a context return a memfilectx for ctx[path]
2031 2031
2032 2032 This is a convenience method for building a memctx based on another
2033 2033 context.
2034 2034 """
2035 2035 def getfilectx(repo, memctx, path):
2036 2036 fctx = ctx[path]
2037 2037 # this is weird but apparently we only keep track of one parent
2038 2038 # (why not only store that instead of a tuple?)
2039 2039 copied = fctx.renamed()
2040 2040 if copied:
2041 2041 copied = copied[0]
2042 2042 return memfilectx(repo, path, fctx.data(),
2043 2043 islink=fctx.islink(), isexec=fctx.isexec(),
2044 2044 copied=copied, memctx=memctx)
2045 2045
2046 2046 return getfilectx
2047 2047
2048 2048 def memfilefrompatch(patchstore):
2049 2049 """Given a patch (e.g. patchstore object) return a memfilectx
2050 2050
2051 2051 This is a convenience method for building a memctx based on a patchstore.
2052 2052 """
2053 2053 def getfilectx(repo, memctx, path):
2054 2054 data, mode, copied = patchstore.getfile(path)
2055 2055 if data is None:
2056 2056 return None
2057 2057 islink, isexec = mode
2058 2058 return memfilectx(repo, path, data, islink=islink,
2059 2059 isexec=isexec, copied=copied,
2060 2060 memctx=memctx)
2061 2061
2062 2062 return getfilectx
2063 2063
2064 2064 class memctx(committablectx):
2065 2065 """Use memctx to perform in-memory commits via localrepo.commitctx().
2066 2066
2067 2067 Revision information is supplied at initialization time while
2068 2068 related files data and is made available through a callback
2069 2069 mechanism. 'repo' is the current localrepo, 'parents' is a
2070 2070 sequence of two parent revisions identifiers (pass None for every
2071 2071 missing parent), 'text' is the commit message and 'files' lists
2072 2072 names of files touched by the revision (normalized and relative to
2073 2073 repository root).
2074 2074
2075 2075 filectxfn(repo, memctx, path) is a callable receiving the
2076 2076 repository, the current memctx object and the normalized path of
2077 2077 requested file, relative to repository root. It is fired by the
2078 2078 commit function for every file in 'files', but calls order is
2079 2079 undefined. If the file is available in the revision being
2080 2080 committed (updated or added), filectxfn returns a memfilectx
2081 2081 object. If the file was removed, filectxfn return None for recent
2082 2082 Mercurial. Moved files are represented by marking the source file
2083 2083 removed and the new file added with copy information (see
2084 2084 memfilectx).
2085 2085
2086 2086 user receives the committer name and defaults to current
2087 2087 repository username, date is the commit date in any format
2088 2088 supported by util.parsedate() and defaults to current date, extra
2089 2089 is a dictionary of metadata or is left empty.
2090 2090 """
2091 2091
2092 2092 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2093 2093 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2094 2094 # this field to determine what to do in filectxfn.
2095 2095 _returnnoneformissingfiles = True
2096 2096
2097 2097 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2098 2098 date=None, extra=None, branch=None, editor=False):
2099 2099 super(memctx, self).__init__(repo, text, user, date, extra)
2100 2100 self._rev = None
2101 2101 self._node = None
2102 2102 parents = [(p or nullid) for p in parents]
2103 2103 p1, p2 = parents
2104 2104 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2105 2105 files = sorted(set(files))
2106 2106 self._files = files
2107 2107 if branch is not None:
2108 2108 self._extra['branch'] = encoding.fromlocal(branch)
2109 2109 self.substate = {}
2110 2110
2111 2111 if isinstance(filectxfn, patch.filestore):
2112 2112 filectxfn = memfilefrompatch(filectxfn)
2113 2113 elif not callable(filectxfn):
2114 2114 # if store is not callable, wrap it in a function
2115 2115 filectxfn = memfilefromctx(filectxfn)
2116 2116
2117 2117 # memoizing increases performance for e.g. vcs convert scenarios.
2118 2118 self._filectxfn = makecachingfilectxfn(filectxfn)
2119 2119
2120 2120 if editor:
2121 2121 self._text = editor(self._repo, self, [])
2122 2122 self._repo.savecommitmessage(self._text)
2123 2123
2124 2124 def filectx(self, path, filelog=None):
2125 2125 """get a file context from the working directory
2126 2126
2127 2127 Returns None if file doesn't exist and should be removed."""
2128 2128 return self._filectxfn(self._repo, self, path)
2129 2129
2130 2130 def commit(self):
2131 2131 """commit context to the repo"""
2132 2132 return self._repo.commitctx(self)
2133 2133
2134 2134 @propertycache
2135 2135 def _manifest(self):
2136 2136 """generate a manifest based on the return values of filectxfn"""
2137 2137
2138 2138 # keep this simple for now; just worry about p1
2139 2139 pctx = self._parents[0]
2140 2140 man = pctx.manifest().copy()
2141 2141
2142 2142 for f in self._status.modified:
2143 2143 p1node = nullid
2144 2144 p2node = nullid
2145 2145 p = pctx[f].parents() # if file isn't in pctx, check p2?
2146 2146 if len(p) > 0:
2147 2147 p1node = p[0].filenode()
2148 2148 if len(p) > 1:
2149 2149 p2node = p[1].filenode()
2150 2150 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2151 2151
2152 2152 for f in self._status.added:
2153 2153 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2154 2154
2155 2155 for f in self._status.removed:
2156 2156 if f in man:
2157 2157 del man[f]
2158 2158
2159 2159 return man
2160 2160
2161 2161 @propertycache
2162 2162 def _status(self):
2163 2163 """Calculate exact status from ``files`` specified at construction
2164 2164 """
2165 2165 man1 = self.p1().manifest()
2166 2166 p2 = self._parents[1]
2167 2167 # "1 < len(self._parents)" can't be used for checking
2168 2168 # existence of the 2nd parent, because "memctx._parents" is
2169 2169 # explicitly initialized by the list, of which length is 2.
2170 2170 if p2.node() != nullid:
2171 2171 man2 = p2.manifest()
2172 2172 managing = lambda f: f in man1 or f in man2
2173 2173 else:
2174 2174 managing = lambda f: f in man1
2175 2175
2176 2176 modified, added, removed = [], [], []
2177 2177 for f in self._files:
2178 2178 if not managing(f):
2179 2179 added.append(f)
2180 2180 elif self[f]:
2181 2181 modified.append(f)
2182 2182 else:
2183 2183 removed.append(f)
2184 2184
2185 2185 return scmutil.status(modified, added, removed, [], [], [], [])
2186 2186
2187 2187 class memfilectx(committablefilectx):
2188 2188 """memfilectx represents an in-memory file to commit.
2189 2189
2190 2190 See memctx and committablefilectx for more details.
2191 2191 """
2192 2192 def __init__(self, repo, path, data, islink=False,
2193 2193 isexec=False, copied=None, memctx=None):
2194 2194 """
2195 2195 path is the normalized file path relative to repository root.
2196 2196 data is the file content as a string.
2197 2197 islink is True if the file is a symbolic link.
2198 2198 isexec is True if the file is executable.
2199 2199 copied is the source file path if current file was copied in the
2200 2200 revision being committed, or None."""
2201 2201 super(memfilectx, self).__init__(repo, path, None, memctx)
2202 2202 self._data = data
2203 2203 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2204 2204 self._copied = None
2205 2205 if copied:
2206 2206 self._copied = (copied, nullid)
2207 2207
2208 2208 def data(self):
2209 2209 return self._data
2210 2210
2211 2211 def remove(self, ignoremissing=False):
2212 2212 """wraps unlink for a repo's working directory"""
2213 2213 # need to figure out what to do here
2214 2214 del self._changectx[self._path]
2215 2215
2216 2216 def write(self, data, flags):
2217 2217 """wraps repo.wwrite"""
2218 2218 self._data = data
2219 2219
2220 2220 class overlayfilectx(committablefilectx):
2221 2221 """Like memfilectx but take an original filectx and optional parameters to
2222 2222 override parts of it. This is useful when fctx.data() is expensive (i.e.
2223 2223 flag processor is expensive) and raw data, flags, and filenode could be
2224 2224 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2225 2225 """
2226 2226
2227 2227 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2228 2228 copied=None, ctx=None):
2229 2229 """originalfctx: filecontext to duplicate
2230 2230
2231 2231 datafunc: None or a function to override data (file content). It is a
2232 2232 function to be lazy. path, flags, copied, ctx: None or overridden value
2233 2233
2234 2234 copied could be (path, rev), or False. copied could also be just path,
2235 2235 and will be converted to (path, nullid). This simplifies some callers.
2236 2236 """
2237 2237
2238 2238 if path is None:
2239 2239 path = originalfctx.path()
2240 2240 if ctx is None:
2241 2241 ctx = originalfctx.changectx()
2242 2242 ctxmatch = lambda: True
2243 2243 else:
2244 2244 ctxmatch = lambda: ctx == originalfctx.changectx()
2245 2245
2246 2246 repo = originalfctx.repo()
2247 2247 flog = originalfctx.filelog()
2248 2248 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2249 2249
2250 2250 if copied is None:
2251 2251 copied = originalfctx.renamed()
2252 2252 copiedmatch = lambda: True
2253 2253 else:
2254 2254 if copied and not isinstance(copied, tuple):
2255 2255 # repo._filecommit will recalculate copyrev so nullid is okay
2256 2256 copied = (copied, nullid)
2257 2257 copiedmatch = lambda: copied == originalfctx.renamed()
2258 2258
2259 2259 # When data, copied (could affect data), ctx (could affect filelog
2260 2260 # parents) are not overridden, rawdata, rawflags, and filenode may be
2261 2261 # reused (repo._filecommit should double check filelog parents).
2262 2262 #
2263 2263 # path, flags are not hashed in filelog (but in manifestlog) so they do
2264 2264 # not affect reusable here.
2265 2265 #
2266 2266 # If ctx or copied is overridden to a same value with originalfctx,
2267 2267 # still consider it's reusable. originalfctx.renamed() may be a bit
2268 2268 # expensive so it's not called unless necessary. Assuming datafunc is
2269 2269 # always expensive, do not call it for this "reusable" test.
2270 2270 reusable = datafunc is None and ctxmatch() and copiedmatch()
2271 2271
2272 2272 if datafunc is None:
2273 2273 datafunc = originalfctx.data
2274 2274 if flags is None:
2275 2275 flags = originalfctx.flags()
2276 2276
2277 2277 self._datafunc = datafunc
2278 2278 self._flags = flags
2279 2279 self._copied = copied
2280 2280
2281 2281 if reusable:
2282 2282 # copy extra fields from originalfctx
2283 2283 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2284 2284 for attr in attrs:
2285 2285 if util.safehasattr(originalfctx, attr):
2286 2286 setattr(self, attr, getattr(originalfctx, attr))
2287 2287
2288 2288 def data(self):
2289 2289 return self._datafunc()
2290 2290
2291 2291 class metadataonlyctx(committablectx):
2292 2292 """Like memctx but it's reusing the manifest of different commit.
2293 2293 Intended to be used by lightweight operations that are creating
2294 2294 metadata-only changes.
2295 2295
2296 2296 Revision information is supplied at initialization time. 'repo' is the
2297 2297 current localrepo, 'ctx' is original revision which manifest we're reuisng
2298 2298 'parents' is a sequence of two parent revisions identifiers (pass None for
2299 2299 every missing parent), 'text' is the commit.
2300 2300
2301 2301 user receives the committer name and defaults to current repository
2302 2302 username, date is the commit date in any format supported by
2303 2303 util.parsedate() and defaults to current date, extra is a dictionary of
2304 2304 metadata or is left empty.
2305 2305 """
2306 2306 def __new__(cls, repo, originalctx, *args, **kwargs):
2307 2307 return super(metadataonlyctx, cls).__new__(cls, repo)
2308 2308
2309 2309 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2310 2310 date=None, extra=None, editor=False):
2311 2311 if text is None:
2312 2312 text = originalctx.description()
2313 2313 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2314 2314 self._rev = None
2315 2315 self._node = None
2316 2316 self._originalctx = originalctx
2317 2317 self._manifestnode = originalctx.manifestnode()
2318 2318 if parents is None:
2319 2319 parents = originalctx.parents()
2320 2320 else:
2321 2321 parents = [repo[p] for p in parents if p is not None]
2322 2322 parents = parents[:]
2323 2323 while len(parents) < 2:
2324 2324 parents.append(repo[nullid])
2325 2325 p1, p2 = self._parents = parents
2326 2326
2327 2327 # sanity check to ensure that the reused manifest parents are
2328 2328 # manifests of our commit parents
2329 2329 mp1, mp2 = self.manifestctx().parents
2330 2330 if p1 != nullid and p1.manifestnode() != mp1:
2331 2331 raise RuntimeError('can\'t reuse the manifest: '
2332 2332 'its p1 doesn\'t match the new ctx p1')
2333 2333 if p2 != nullid and p2.manifestnode() != mp2:
2334 2334 raise RuntimeError('can\'t reuse the manifest: '
2335 2335 'its p2 doesn\'t match the new ctx p2')
2336 2336
2337 2337 self._files = originalctx.files()
2338 2338 self.substate = {}
2339 2339
2340 2340 if editor:
2341 2341 self._text = editor(self._repo, self, [])
2342 2342 self._repo.savecommitmessage(self._text)
2343 2343
2344 2344 def manifestnode(self):
2345 2345 return self._manifestnode
2346 2346
2347 2347 @property
2348 2348 def _manifestctx(self):
2349 2349 return self._repo.manifestlog[self._manifestnode]
2350 2350
2351 2351 def filectx(self, path, filelog=None):
2352 2352 return self._originalctx.filectx(path, filelog=filelog)
2353 2353
2354 2354 def commit(self):
2355 2355 """commit context to the repo"""
2356 2356 return self._repo.commitctx(self)
2357 2357
2358 2358 @property
2359 2359 def _manifest(self):
2360 2360 return self._originalctx.manifest()
2361 2361
2362 2362 @propertycache
2363 2363 def _status(self):
2364 2364 """Calculate exact status from ``files`` specified in the ``origctx``
2365 2365 and parents manifests.
2366 2366 """
2367 2367 man1 = self.p1().manifest()
2368 2368 p2 = self._parents[1]
2369 2369 # "1 < len(self._parents)" can't be used for checking
2370 2370 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2371 2371 # explicitly initialized by the list, of which length is 2.
2372 2372 if p2.node() != nullid:
2373 2373 man2 = p2.manifest()
2374 2374 managing = lambda f: f in man1 or f in man2
2375 2375 else:
2376 2376 managing = lambda f: f in man1
2377 2377
2378 2378 modified, added, removed = [], [], []
2379 2379 for f in self._files:
2380 2380 if not managing(f):
2381 2381 added.append(f)
2382 2382 elif f in self:
2383 2383 modified.append(f)
2384 2384 else:
2385 2385 removed.append(f)
2386 2386
2387 2387 return scmutil.status(modified, added, removed, [], [], [], [])
2388
2389 class arbitraryfilectx(object):
2390 """Allows you to use filectx-like functions on a file in an arbitrary
2391 location on disk, possibly not in the working directory.
2392 """
2393 def __init__(self, path):
2394 self._path = path
2395
2396 def cmp(self, otherfilectx):
2397 return self.data() != otherfilectx.data()
2398
2399 def path(self):
2400 return self._path
2401
2402 def flags(self):
2403 return ''
2404
2405 def data(self):
2406 return util.readfile(self._path)
2407
2408 def decodeddata(self):
2409 with open(self._path, "rb") as f:
2410 return f.read()
2411
2412 def remove(self):
2413 util.unlink(self._path)
2414
2415 def write(self, data, flags):
2416 assert not flags
2417 with open(self._path, "w") as f:
2418 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now