##// END OF EJS Templates
context: add overlayfilectx.cmp()...
Phil Cohen -
r34784:0c812885 default
parent child Browse files
Show More
@@ -1,2589 +1,2592
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 wdirnodes,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from .thirdparty import (
29 from .thirdparty import (
30 attr,
30 attr,
31 )
31 )
32 from . import (
32 from . import (
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 mdiff,
37 mdiff,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 revlog,
44 revlog,
45 scmutil,
45 scmutil,
46 sparse,
46 sparse,
47 subrepo,
47 subrepo,
48 util,
48 util,
49 )
49 )
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52
52
53 nonascii = re.compile(r'[^\x21-\x7f]').search
53 nonascii = re.compile(r'[^\x21-\x7f]').search
54
54
55 class basectx(object):
55 class basectx(object):
56 """A basectx object represents the common logic for its children:
56 """A basectx object represents the common logic for its children:
57 changectx: read-only context that is already present in the repo,
57 changectx: read-only context that is already present in the repo,
58 workingctx: a context that represents the working directory and can
58 workingctx: a context that represents the working directory and can
59 be committed,
59 be committed,
60 memctx: a context that represents changes in-memory and can also
60 memctx: a context that represents changes in-memory and can also
61 be committed."""
61 be committed."""
62 def __new__(cls, repo, changeid='', *args, **kwargs):
62 def __new__(cls, repo, changeid='', *args, **kwargs):
63 if isinstance(changeid, basectx):
63 if isinstance(changeid, basectx):
64 return changeid
64 return changeid
65
65
66 o = super(basectx, cls).__new__(cls)
66 o = super(basectx, cls).__new__(cls)
67
67
68 o._repo = repo
68 o._repo = repo
69 o._rev = nullrev
69 o._rev = nullrev
70 o._node = nullid
70 o._node = nullid
71
71
72 return o
72 return o
73
73
74 def __bytes__(self):
74 def __bytes__(self):
75 return short(self.node())
75 return short(self.node())
76
76
77 __str__ = encoding.strmethod(__bytes__)
77 __str__ = encoding.strmethod(__bytes__)
78
78
79 def __int__(self):
79 def __int__(self):
80 return self.rev()
80 return self.rev()
81
81
82 def __repr__(self):
82 def __repr__(self):
83 return r"<%s %s>" % (type(self).__name__, str(self))
83 return r"<%s %s>" % (type(self).__name__, str(self))
84
84
85 def __eq__(self, other):
85 def __eq__(self, other):
86 try:
86 try:
87 return type(self) == type(other) and self._rev == other._rev
87 return type(self) == type(other) and self._rev == other._rev
88 except AttributeError:
88 except AttributeError:
89 return False
89 return False
90
90
91 def __ne__(self, other):
91 def __ne__(self, other):
92 return not (self == other)
92 return not (self == other)
93
93
94 def __contains__(self, key):
94 def __contains__(self, key):
95 return key in self._manifest
95 return key in self._manifest
96
96
97 def __getitem__(self, key):
97 def __getitem__(self, key):
98 return self.filectx(key)
98 return self.filectx(key)
99
99
100 def __iter__(self):
100 def __iter__(self):
101 return iter(self._manifest)
101 return iter(self._manifest)
102
102
103 def _buildstatusmanifest(self, status):
103 def _buildstatusmanifest(self, status):
104 """Builds a manifest that includes the given status results, if this is
104 """Builds a manifest that includes the given status results, if this is
105 a working copy context. For non-working copy contexts, it just returns
105 a working copy context. For non-working copy contexts, it just returns
106 the normal manifest."""
106 the normal manifest."""
107 return self.manifest()
107 return self.manifest()
108
108
109 def _matchstatus(self, other, match):
109 def _matchstatus(self, other, match):
110 """This internal method provides a way for child objects to override the
110 """This internal method provides a way for child objects to override the
111 match operator.
111 match operator.
112 """
112 """
113 return match
113 return match
114
114
115 def _buildstatus(self, other, s, match, listignored, listclean,
115 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
116 listunknown):
117 """build a status with respect to another context"""
117 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
118 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
123 # delta application.
124 mf2 = None
124 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
125 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
126 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
128 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
129 mf2 = self._buildstatusmanifest(s)
130
130
131 modified, added = [], []
131 modified, added = [], []
132 removed = []
132 removed = []
133 clean = []
133 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
135 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
136 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
137 for fn, value in d.iteritems():
138 if fn in deletedset:
138 if fn in deletedset:
139 continue
139 continue
140 if value is None:
140 if value is None:
141 clean.append(fn)
141 clean.append(fn)
142 continue
142 continue
143 (node1, flag1), (node2, flag2) = value
143 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
144 if node1 is None:
145 added.append(fn)
145 added.append(fn)
146 elif node2 is None:
146 elif node2 is None:
147 removed.append(fn)
147 removed.append(fn)
148 elif flag1 != flag2:
148 elif flag1 != flag2:
149 modified.append(fn)
149 modified.append(fn)
150 elif node2 not in wdirnodes:
150 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
151 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
152 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
153 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
154 # to a file as a modification.
155 modified.append(fn)
155 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
156 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
157 modified.append(fn)
158 else:
158 else:
159 clean.append(fn)
159 clean.append(fn)
160
160
161 if removed:
161 if removed:
162 # need to filter files if they are already reported as removed
162 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
163 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
165 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
166 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
167 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
168 removed = [fn for fn in removed if fn not in deletedset]
169
169
170 return scmutil.status(modified, added, removed, deleted, unknown,
170 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
171 ignored, clean)
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepo.state(self, self._repo.ui)
175 return subrepo.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186 def manifest(self):
186 def manifest(self):
187 return self._manifest
187 return self._manifest
188 def manifestctx(self):
188 def manifestctx(self):
189 return self._manifestctx
189 return self._manifestctx
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192 def phasestr(self):
192 def phasestr(self):
193 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
194 def mutable(self):
194 def mutable(self):
195 return self.phase() > phases.public
195 return self.phase() > phases.public
196
196
197 def getfileset(self, expr):
197 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
199
199
200 def obsolete(self):
200 def obsolete(self):
201 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
203
204 def extinct(self):
204 def extinct(self):
205 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
207
208 def unstable(self):
208 def unstable(self):
209 msg = ("'context.unstable' is deprecated, "
209 msg = ("'context.unstable' is deprecated, "
210 "use 'context.orphan'")
210 "use 'context.orphan'")
211 self._repo.ui.deprecwarn(msg, '4.4')
211 self._repo.ui.deprecwarn(msg, '4.4')
212 return self.orphan()
212 return self.orphan()
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete but it's ancestor are"""
215 """True if the changeset is not obsolete but it's ancestor are"""
216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
217
217
218 def bumped(self):
218 def bumped(self):
219 msg = ("'context.bumped' is deprecated, "
219 msg = ("'context.bumped' is deprecated, "
220 "use 'context.phasedivergent'")
220 "use 'context.phasedivergent'")
221 self._repo.ui.deprecwarn(msg, '4.4')
221 self._repo.ui.deprecwarn(msg, '4.4')
222 return self.phasedivergent()
222 return self.phasedivergent()
223
223
224 def phasedivergent(self):
224 def phasedivergent(self):
225 """True if the changeset try to be a successor of a public changeset
225 """True if the changeset try to be a successor of a public changeset
226
226
227 Only non-public and non-obsolete changesets may be bumped.
227 Only non-public and non-obsolete changesets may be bumped.
228 """
228 """
229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
230
230
231 def divergent(self):
231 def divergent(self):
232 msg = ("'context.divergent' is deprecated, "
232 msg = ("'context.divergent' is deprecated, "
233 "use 'context.contentdivergent'")
233 "use 'context.contentdivergent'")
234 self._repo.ui.deprecwarn(msg, '4.4')
234 self._repo.ui.deprecwarn(msg, '4.4')
235 return self.contentdivergent()
235 return self.contentdivergent()
236
236
237 def contentdivergent(self):
237 def contentdivergent(self):
238 """Is a successors of a changeset with multiple possible successors set
238 """Is a successors of a changeset with multiple possible successors set
239
239
240 Only non-public and non-obsolete changesets may be divergent.
240 Only non-public and non-obsolete changesets may be divergent.
241 """
241 """
242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
243
243
244 def troubled(self):
244 def troubled(self):
245 msg = ("'context.troubled' is deprecated, "
245 msg = ("'context.troubled' is deprecated, "
246 "use 'context.isunstable'")
246 "use 'context.isunstable'")
247 self._repo.ui.deprecwarn(msg, '4.4')
247 self._repo.ui.deprecwarn(msg, '4.4')
248 return self.isunstable()
248 return self.isunstable()
249
249
250 def isunstable(self):
250 def isunstable(self):
251 """True if the changeset is either unstable, bumped or divergent"""
251 """True if the changeset is either unstable, bumped or divergent"""
252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
253
253
254 def troubles(self):
254 def troubles(self):
255 """Keep the old version around in order to avoid breaking extensions
255 """Keep the old version around in order to avoid breaking extensions
256 about different return values.
256 about different return values.
257 """
257 """
258 msg = ("'context.troubles' is deprecated, "
258 msg = ("'context.troubles' is deprecated, "
259 "use 'context.instabilities'")
259 "use 'context.instabilities'")
260 self._repo.ui.deprecwarn(msg, '4.4')
260 self._repo.ui.deprecwarn(msg, '4.4')
261
261
262 troubles = []
262 troubles = []
263 if self.orphan():
263 if self.orphan():
264 troubles.append('orphan')
264 troubles.append('orphan')
265 if self.phasedivergent():
265 if self.phasedivergent():
266 troubles.append('bumped')
266 troubles.append('bumped')
267 if self.contentdivergent():
267 if self.contentdivergent():
268 troubles.append('divergent')
268 troubles.append('divergent')
269 return troubles
269 return troubles
270
270
271 def instabilities(self):
271 def instabilities(self):
272 """return the list of instabilities affecting this changeset.
272 """return the list of instabilities affecting this changeset.
273
273
274 Instabilities are returned as strings. possible values are:
274 Instabilities are returned as strings. possible values are:
275 - orphan,
275 - orphan,
276 - phase-divergent,
276 - phase-divergent,
277 - content-divergent.
277 - content-divergent.
278 """
278 """
279 instabilities = []
279 instabilities = []
280 if self.orphan():
280 if self.orphan():
281 instabilities.append('orphan')
281 instabilities.append('orphan')
282 if self.phasedivergent():
282 if self.phasedivergent():
283 instabilities.append('phase-divergent')
283 instabilities.append('phase-divergent')
284 if self.contentdivergent():
284 if self.contentdivergent():
285 instabilities.append('content-divergent')
285 instabilities.append('content-divergent')
286 return instabilities
286 return instabilities
287
287
288 def parents(self):
288 def parents(self):
289 """return contexts for each parent changeset"""
289 """return contexts for each parent changeset"""
290 return self._parents
290 return self._parents
291
291
292 def p1(self):
292 def p1(self):
293 return self._parents[0]
293 return self._parents[0]
294
294
295 def p2(self):
295 def p2(self):
296 parents = self._parents
296 parents = self._parents
297 if len(parents) == 2:
297 if len(parents) == 2:
298 return parents[1]
298 return parents[1]
299 return changectx(self._repo, nullrev)
299 return changectx(self._repo, nullrev)
300
300
301 def _fileinfo(self, path):
301 def _fileinfo(self, path):
302 if r'_manifest' in self.__dict__:
302 if r'_manifest' in self.__dict__:
303 try:
303 try:
304 return self._manifest[path], self._manifest.flags(path)
304 return self._manifest[path], self._manifest.flags(path)
305 except KeyError:
305 except KeyError:
306 raise error.ManifestLookupError(self._node, path,
306 raise error.ManifestLookupError(self._node, path,
307 _('not found in manifest'))
307 _('not found in manifest'))
308 if r'_manifestdelta' in self.__dict__ or path in self.files():
308 if r'_manifestdelta' in self.__dict__ or path in self.files():
309 if path in self._manifestdelta:
309 if path in self._manifestdelta:
310 return (self._manifestdelta[path],
310 return (self._manifestdelta[path],
311 self._manifestdelta.flags(path))
311 self._manifestdelta.flags(path))
312 mfl = self._repo.manifestlog
312 mfl = self._repo.manifestlog
313 try:
313 try:
314 node, flag = mfl[self._changeset.manifest].find(path)
314 node, flag = mfl[self._changeset.manifest].find(path)
315 except KeyError:
315 except KeyError:
316 raise error.ManifestLookupError(self._node, path,
316 raise error.ManifestLookupError(self._node, path,
317 _('not found in manifest'))
317 _('not found in manifest'))
318
318
319 return node, flag
319 return node, flag
320
320
321 def filenode(self, path):
321 def filenode(self, path):
322 return self._fileinfo(path)[0]
322 return self._fileinfo(path)[0]
323
323
324 def flags(self, path):
324 def flags(self, path):
325 try:
325 try:
326 return self._fileinfo(path)[1]
326 return self._fileinfo(path)[1]
327 except error.LookupError:
327 except error.LookupError:
328 return ''
328 return ''
329
329
330 def sub(self, path, allowcreate=True):
330 def sub(self, path, allowcreate=True):
331 '''return a subrepo for the stored revision of path, never wdir()'''
331 '''return a subrepo for the stored revision of path, never wdir()'''
332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
333
333
334 def nullsub(self, path, pctx):
334 def nullsub(self, path, pctx):
335 return subrepo.nullsubrepo(self, path, pctx)
335 return subrepo.nullsubrepo(self, path, pctx)
336
336
337 def workingsub(self, path):
337 def workingsub(self, path):
338 '''return a subrepo for the stored revision, or wdir if this is a wdir
338 '''return a subrepo for the stored revision, or wdir if this is a wdir
339 context.
339 context.
340 '''
340 '''
341 return subrepo.subrepo(self, path, allowwdir=True)
341 return subrepo.subrepo(self, path, allowwdir=True)
342
342
343 def match(self, pats=None, include=None, exclude=None, default='glob',
343 def match(self, pats=None, include=None, exclude=None, default='glob',
344 listsubrepos=False, badfn=None):
344 listsubrepos=False, badfn=None):
345 r = self._repo
345 r = self._repo
346 return matchmod.match(r.root, r.getcwd(), pats,
346 return matchmod.match(r.root, r.getcwd(), pats,
347 include, exclude, default,
347 include, exclude, default,
348 auditor=r.nofsauditor, ctx=self,
348 auditor=r.nofsauditor, ctx=self,
349 listsubrepos=listsubrepos, badfn=badfn)
349 listsubrepos=listsubrepos, badfn=badfn)
350
350
351 def diff(self, ctx2=None, match=None, **opts):
351 def diff(self, ctx2=None, match=None, **opts):
352 """Returns a diff generator for the given contexts and matcher"""
352 """Returns a diff generator for the given contexts and matcher"""
353 if ctx2 is None:
353 if ctx2 is None:
354 ctx2 = self.p1()
354 ctx2 = self.p1()
355 if ctx2 is not None:
355 if ctx2 is not None:
356 ctx2 = self._repo[ctx2]
356 ctx2 = self._repo[ctx2]
357 diffopts = patch.diffopts(self._repo.ui, opts)
357 diffopts = patch.diffopts(self._repo.ui, opts)
358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
359
359
360 def dirs(self):
360 def dirs(self):
361 return self._manifest.dirs()
361 return self._manifest.dirs()
362
362
363 def hasdir(self, dir):
363 def hasdir(self, dir):
364 return self._manifest.hasdir(dir)
364 return self._manifest.hasdir(dir)
365
365
366 def status(self, other=None, match=None, listignored=False,
366 def status(self, other=None, match=None, listignored=False,
367 listclean=False, listunknown=False, listsubrepos=False):
367 listclean=False, listunknown=False, listsubrepos=False):
368 """return status of files between two nodes or node and working
368 """return status of files between two nodes or node and working
369 directory.
369 directory.
370
370
371 If other is None, compare this node with working directory.
371 If other is None, compare this node with working directory.
372
372
373 returns (modified, added, removed, deleted, unknown, ignored, clean)
373 returns (modified, added, removed, deleted, unknown, ignored, clean)
374 """
374 """
375
375
376 ctx1 = self
376 ctx1 = self
377 ctx2 = self._repo[other]
377 ctx2 = self._repo[other]
378
378
379 # This next code block is, admittedly, fragile logic that tests for
379 # This next code block is, admittedly, fragile logic that tests for
380 # reversing the contexts and wouldn't need to exist if it weren't for
380 # reversing the contexts and wouldn't need to exist if it weren't for
381 # the fast (and common) code path of comparing the working directory
381 # the fast (and common) code path of comparing the working directory
382 # with its first parent.
382 # with its first parent.
383 #
383 #
384 # What we're aiming for here is the ability to call:
384 # What we're aiming for here is the ability to call:
385 #
385 #
386 # workingctx.status(parentctx)
386 # workingctx.status(parentctx)
387 #
387 #
388 # If we always built the manifest for each context and compared those,
388 # If we always built the manifest for each context and compared those,
389 # then we'd be done. But the special case of the above call means we
389 # then we'd be done. But the special case of the above call means we
390 # just copy the manifest of the parent.
390 # just copy the manifest of the parent.
391 reversed = False
391 reversed = False
392 if (not isinstance(ctx1, changectx)
392 if (not isinstance(ctx1, changectx)
393 and isinstance(ctx2, changectx)):
393 and isinstance(ctx2, changectx)):
394 reversed = True
394 reversed = True
395 ctx1, ctx2 = ctx2, ctx1
395 ctx1, ctx2 = ctx2, ctx1
396
396
397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
398 match = ctx2._matchstatus(ctx1, match)
398 match = ctx2._matchstatus(ctx1, match)
399 r = scmutil.status([], [], [], [], [], [], [])
399 r = scmutil.status([], [], [], [], [], [], [])
400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
401 listunknown)
401 listunknown)
402
402
403 if reversed:
403 if reversed:
404 # Reverse added and removed. Clear deleted, unknown and ignored as
404 # Reverse added and removed. Clear deleted, unknown and ignored as
405 # these make no sense to reverse.
405 # these make no sense to reverse.
406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
407 r.clean)
407 r.clean)
408
408
409 if listsubrepos:
409 if listsubrepos:
410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
411 try:
411 try:
412 rev2 = ctx2.subrev(subpath)
412 rev2 = ctx2.subrev(subpath)
413 except KeyError:
413 except KeyError:
414 # A subrepo that existed in node1 was deleted between
414 # A subrepo that existed in node1 was deleted between
415 # node1 and node2 (inclusive). Thus, ctx2's substate
415 # node1 and node2 (inclusive). Thus, ctx2's substate
416 # won't contain that subpath. The best we can do ignore it.
416 # won't contain that subpath. The best we can do ignore it.
417 rev2 = None
417 rev2 = None
418 submatch = matchmod.subdirmatcher(subpath, match)
418 submatch = matchmod.subdirmatcher(subpath, match)
419 s = sub.status(rev2, match=submatch, ignored=listignored,
419 s = sub.status(rev2, match=submatch, ignored=listignored,
420 clean=listclean, unknown=listunknown,
420 clean=listclean, unknown=listunknown,
421 listsubrepos=True)
421 listsubrepos=True)
422 for rfiles, sfiles in zip(r, s):
422 for rfiles, sfiles in zip(r, s):
423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
424
424
425 for l in r:
425 for l in r:
426 l.sort()
426 l.sort()
427
427
428 return r
428 return r
429
429
430 def _filterederror(repo, changeid):
430 def _filterederror(repo, changeid):
431 """build an exception to be raised about a filtered changeid
431 """build an exception to be raised about a filtered changeid
432
432
433 This is extracted in a function to help extensions (eg: evolve) to
433 This is extracted in a function to help extensions (eg: evolve) to
434 experiment with various message variants."""
434 experiment with various message variants."""
435 if repo.filtername.startswith('visible'):
435 if repo.filtername.startswith('visible'):
436 msg = _("hidden revision '%s'") % changeid
436 msg = _("hidden revision '%s'") % changeid
437 hint = _('use --hidden to access hidden revisions')
437 hint = _('use --hidden to access hidden revisions')
438 return error.FilteredRepoLookupError(msg, hint=hint)
438 return error.FilteredRepoLookupError(msg, hint=hint)
439 msg = _("filtered revision '%s' (not in '%s' subset)")
439 msg = _("filtered revision '%s' (not in '%s' subset)")
440 msg %= (changeid, repo.filtername)
440 msg %= (changeid, repo.filtername)
441 return error.FilteredRepoLookupError(msg)
441 return error.FilteredRepoLookupError(msg)
442
442
443 class changectx(basectx):
443 class changectx(basectx):
444 """A changecontext object makes access to data related to a particular
444 """A changecontext object makes access to data related to a particular
445 changeset convenient. It represents a read-only context already present in
445 changeset convenient. It represents a read-only context already present in
446 the repo."""
446 the repo."""
447 def __init__(self, repo, changeid=''):
447 def __init__(self, repo, changeid=''):
448 """changeid is a revision number, node, or tag"""
448 """changeid is a revision number, node, or tag"""
449
449
450 # since basectx.__new__ already took care of copying the object, we
450 # since basectx.__new__ already took care of copying the object, we
451 # don't need to do anything in __init__, so we just exit here
451 # don't need to do anything in __init__, so we just exit here
452 if isinstance(changeid, basectx):
452 if isinstance(changeid, basectx):
453 return
453 return
454
454
455 if changeid == '':
455 if changeid == '':
456 changeid = '.'
456 changeid = '.'
457 self._repo = repo
457 self._repo = repo
458
458
459 try:
459 try:
460 if isinstance(changeid, int):
460 if isinstance(changeid, int):
461 self._node = repo.changelog.node(changeid)
461 self._node = repo.changelog.node(changeid)
462 self._rev = changeid
462 self._rev = changeid
463 return
463 return
464 if not pycompat.ispy3 and isinstance(changeid, long):
464 if not pycompat.ispy3 and isinstance(changeid, long):
465 changeid = str(changeid)
465 changeid = str(changeid)
466 if changeid == 'null':
466 if changeid == 'null':
467 self._node = nullid
467 self._node = nullid
468 self._rev = nullrev
468 self._rev = nullrev
469 return
469 return
470 if changeid == 'tip':
470 if changeid == 'tip':
471 self._node = repo.changelog.tip()
471 self._node = repo.changelog.tip()
472 self._rev = repo.changelog.rev(self._node)
472 self._rev = repo.changelog.rev(self._node)
473 return
473 return
474 if changeid == '.' or changeid == repo.dirstate.p1():
474 if changeid == '.' or changeid == repo.dirstate.p1():
475 # this is a hack to delay/avoid loading obsmarkers
475 # this is a hack to delay/avoid loading obsmarkers
476 # when we know that '.' won't be hidden
476 # when we know that '.' won't be hidden
477 self._node = repo.dirstate.p1()
477 self._node = repo.dirstate.p1()
478 self._rev = repo.unfiltered().changelog.rev(self._node)
478 self._rev = repo.unfiltered().changelog.rev(self._node)
479 return
479 return
480 if len(changeid) == 20:
480 if len(changeid) == 20:
481 try:
481 try:
482 self._node = changeid
482 self._node = changeid
483 self._rev = repo.changelog.rev(changeid)
483 self._rev = repo.changelog.rev(changeid)
484 return
484 return
485 except error.FilteredRepoLookupError:
485 except error.FilteredRepoLookupError:
486 raise
486 raise
487 except LookupError:
487 except LookupError:
488 pass
488 pass
489
489
490 try:
490 try:
491 r = int(changeid)
491 r = int(changeid)
492 if '%d' % r != changeid:
492 if '%d' % r != changeid:
493 raise ValueError
493 raise ValueError
494 l = len(repo.changelog)
494 l = len(repo.changelog)
495 if r < 0:
495 if r < 0:
496 r += l
496 r += l
497 if r < 0 or r >= l and r != wdirrev:
497 if r < 0 or r >= l and r != wdirrev:
498 raise ValueError
498 raise ValueError
499 self._rev = r
499 self._rev = r
500 self._node = repo.changelog.node(r)
500 self._node = repo.changelog.node(r)
501 return
501 return
502 except error.FilteredIndexError:
502 except error.FilteredIndexError:
503 raise
503 raise
504 except (ValueError, OverflowError, IndexError):
504 except (ValueError, OverflowError, IndexError):
505 pass
505 pass
506
506
507 if len(changeid) == 40:
507 if len(changeid) == 40:
508 try:
508 try:
509 self._node = bin(changeid)
509 self._node = bin(changeid)
510 self._rev = repo.changelog.rev(self._node)
510 self._rev = repo.changelog.rev(self._node)
511 return
511 return
512 except error.FilteredLookupError:
512 except error.FilteredLookupError:
513 raise
513 raise
514 except (TypeError, LookupError):
514 except (TypeError, LookupError):
515 pass
515 pass
516
516
517 # lookup bookmarks through the name interface
517 # lookup bookmarks through the name interface
518 try:
518 try:
519 self._node = repo.names.singlenode(repo, changeid)
519 self._node = repo.names.singlenode(repo, changeid)
520 self._rev = repo.changelog.rev(self._node)
520 self._rev = repo.changelog.rev(self._node)
521 return
521 return
522 except KeyError:
522 except KeyError:
523 pass
523 pass
524 except error.FilteredRepoLookupError:
524 except error.FilteredRepoLookupError:
525 raise
525 raise
526 except error.RepoLookupError:
526 except error.RepoLookupError:
527 pass
527 pass
528
528
529 self._node = repo.unfiltered().changelog._partialmatch(changeid)
529 self._node = repo.unfiltered().changelog._partialmatch(changeid)
530 if self._node is not None:
530 if self._node is not None:
531 self._rev = repo.changelog.rev(self._node)
531 self._rev = repo.changelog.rev(self._node)
532 return
532 return
533
533
534 # lookup failed
534 # lookup failed
535 # check if it might have come from damaged dirstate
535 # check if it might have come from damaged dirstate
536 #
536 #
537 # XXX we could avoid the unfiltered if we had a recognizable
537 # XXX we could avoid the unfiltered if we had a recognizable
538 # exception for filtered changeset access
538 # exception for filtered changeset access
539 if changeid in repo.unfiltered().dirstate.parents():
539 if changeid in repo.unfiltered().dirstate.parents():
540 msg = _("working directory has unknown parent '%s'!")
540 msg = _("working directory has unknown parent '%s'!")
541 raise error.Abort(msg % short(changeid))
541 raise error.Abort(msg % short(changeid))
542 try:
542 try:
543 if len(changeid) == 20 and nonascii(changeid):
543 if len(changeid) == 20 and nonascii(changeid):
544 changeid = hex(changeid)
544 changeid = hex(changeid)
545 except TypeError:
545 except TypeError:
546 pass
546 pass
547 except (error.FilteredIndexError, error.FilteredLookupError,
547 except (error.FilteredIndexError, error.FilteredLookupError,
548 error.FilteredRepoLookupError):
548 error.FilteredRepoLookupError):
549 raise _filterederror(repo, changeid)
549 raise _filterederror(repo, changeid)
550 except IndexError:
550 except IndexError:
551 pass
551 pass
552 raise error.RepoLookupError(
552 raise error.RepoLookupError(
553 _("unknown revision '%s'") % changeid)
553 _("unknown revision '%s'") % changeid)
554
554
555 def __hash__(self):
555 def __hash__(self):
556 try:
556 try:
557 return hash(self._rev)
557 return hash(self._rev)
558 except AttributeError:
558 except AttributeError:
559 return id(self)
559 return id(self)
560
560
561 def __nonzero__(self):
561 def __nonzero__(self):
562 return self._rev != nullrev
562 return self._rev != nullrev
563
563
564 __bool__ = __nonzero__
564 __bool__ = __nonzero__
565
565
566 @propertycache
566 @propertycache
567 def _changeset(self):
567 def _changeset(self):
568 return self._repo.changelog.changelogrevision(self.rev())
568 return self._repo.changelog.changelogrevision(self.rev())
569
569
570 @propertycache
570 @propertycache
571 def _manifest(self):
571 def _manifest(self):
572 return self._manifestctx.read()
572 return self._manifestctx.read()
573
573
574 @property
574 @property
575 def _manifestctx(self):
575 def _manifestctx(self):
576 return self._repo.manifestlog[self._changeset.manifest]
576 return self._repo.manifestlog[self._changeset.manifest]
577
577
578 @propertycache
578 @propertycache
579 def _manifestdelta(self):
579 def _manifestdelta(self):
580 return self._manifestctx.readdelta()
580 return self._manifestctx.readdelta()
581
581
582 @propertycache
582 @propertycache
583 def _parents(self):
583 def _parents(self):
584 repo = self._repo
584 repo = self._repo
585 p1, p2 = repo.changelog.parentrevs(self._rev)
585 p1, p2 = repo.changelog.parentrevs(self._rev)
586 if p2 == nullrev:
586 if p2 == nullrev:
587 return [changectx(repo, p1)]
587 return [changectx(repo, p1)]
588 return [changectx(repo, p1), changectx(repo, p2)]
588 return [changectx(repo, p1), changectx(repo, p2)]
589
589
590 def changeset(self):
590 def changeset(self):
591 c = self._changeset
591 c = self._changeset
592 return (
592 return (
593 c.manifest,
593 c.manifest,
594 c.user,
594 c.user,
595 c.date,
595 c.date,
596 c.files,
596 c.files,
597 c.description,
597 c.description,
598 c.extra,
598 c.extra,
599 )
599 )
600 def manifestnode(self):
600 def manifestnode(self):
601 return self._changeset.manifest
601 return self._changeset.manifest
602
602
603 def user(self):
603 def user(self):
604 return self._changeset.user
604 return self._changeset.user
605 def date(self):
605 def date(self):
606 return self._changeset.date
606 return self._changeset.date
607 def files(self):
607 def files(self):
608 return self._changeset.files
608 return self._changeset.files
609 def description(self):
609 def description(self):
610 return self._changeset.description
610 return self._changeset.description
611 def branch(self):
611 def branch(self):
612 return encoding.tolocal(self._changeset.extra.get("branch"))
612 return encoding.tolocal(self._changeset.extra.get("branch"))
613 def closesbranch(self):
613 def closesbranch(self):
614 return 'close' in self._changeset.extra
614 return 'close' in self._changeset.extra
615 def extra(self):
615 def extra(self):
616 return self._changeset.extra
616 return self._changeset.extra
617 def tags(self):
617 def tags(self):
618 return self._repo.nodetags(self._node)
618 return self._repo.nodetags(self._node)
619 def bookmarks(self):
619 def bookmarks(self):
620 return self._repo.nodebookmarks(self._node)
620 return self._repo.nodebookmarks(self._node)
621 def phase(self):
621 def phase(self):
622 return self._repo._phasecache.phase(self._repo, self._rev)
622 return self._repo._phasecache.phase(self._repo, self._rev)
623 def hidden(self):
623 def hidden(self):
624 return self._rev in repoview.filterrevs(self._repo, 'visible')
624 return self._rev in repoview.filterrevs(self._repo, 'visible')
625
625
626 def isinmemory(self):
626 def isinmemory(self):
627 return False
627 return False
628
628
629 def children(self):
629 def children(self):
630 """return contexts for each child changeset"""
630 """return contexts for each child changeset"""
631 c = self._repo.changelog.children(self._node)
631 c = self._repo.changelog.children(self._node)
632 return [changectx(self._repo, x) for x in c]
632 return [changectx(self._repo, x) for x in c]
633
633
634 def ancestors(self):
634 def ancestors(self):
635 for a in self._repo.changelog.ancestors([self._rev]):
635 for a in self._repo.changelog.ancestors([self._rev]):
636 yield changectx(self._repo, a)
636 yield changectx(self._repo, a)
637
637
638 def descendants(self):
638 def descendants(self):
639 for d in self._repo.changelog.descendants([self._rev]):
639 for d in self._repo.changelog.descendants([self._rev]):
640 yield changectx(self._repo, d)
640 yield changectx(self._repo, d)
641
641
642 def filectx(self, path, fileid=None, filelog=None):
642 def filectx(self, path, fileid=None, filelog=None):
643 """get a file context from this changeset"""
643 """get a file context from this changeset"""
644 if fileid is None:
644 if fileid is None:
645 fileid = self.filenode(path)
645 fileid = self.filenode(path)
646 return filectx(self._repo, path, fileid=fileid,
646 return filectx(self._repo, path, fileid=fileid,
647 changectx=self, filelog=filelog)
647 changectx=self, filelog=filelog)
648
648
649 def ancestor(self, c2, warn=False):
649 def ancestor(self, c2, warn=False):
650 """return the "best" ancestor context of self and c2
650 """return the "best" ancestor context of self and c2
651
651
652 If there are multiple candidates, it will show a message and check
652 If there are multiple candidates, it will show a message and check
653 merge.preferancestor configuration before falling back to the
653 merge.preferancestor configuration before falling back to the
654 revlog ancestor."""
654 revlog ancestor."""
655 # deal with workingctxs
655 # deal with workingctxs
656 n2 = c2._node
656 n2 = c2._node
657 if n2 is None:
657 if n2 is None:
658 n2 = c2._parents[0]._node
658 n2 = c2._parents[0]._node
659 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
659 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
660 if not cahs:
660 if not cahs:
661 anc = nullid
661 anc = nullid
662 elif len(cahs) == 1:
662 elif len(cahs) == 1:
663 anc = cahs[0]
663 anc = cahs[0]
664 else:
664 else:
665 # experimental config: merge.preferancestor
665 # experimental config: merge.preferancestor
666 for r in self._repo.ui.configlist('merge', 'preferancestor'):
666 for r in self._repo.ui.configlist('merge', 'preferancestor'):
667 try:
667 try:
668 ctx = changectx(self._repo, r)
668 ctx = changectx(self._repo, r)
669 except error.RepoLookupError:
669 except error.RepoLookupError:
670 continue
670 continue
671 anc = ctx.node()
671 anc = ctx.node()
672 if anc in cahs:
672 if anc in cahs:
673 break
673 break
674 else:
674 else:
675 anc = self._repo.changelog.ancestor(self._node, n2)
675 anc = self._repo.changelog.ancestor(self._node, n2)
676 if warn:
676 if warn:
677 self._repo.ui.status(
677 self._repo.ui.status(
678 (_("note: using %s as ancestor of %s and %s\n") %
678 (_("note: using %s as ancestor of %s and %s\n") %
679 (short(anc), short(self._node), short(n2))) +
679 (short(anc), short(self._node), short(n2))) +
680 ''.join(_(" alternatively, use --config "
680 ''.join(_(" alternatively, use --config "
681 "merge.preferancestor=%s\n") %
681 "merge.preferancestor=%s\n") %
682 short(n) for n in sorted(cahs) if n != anc))
682 short(n) for n in sorted(cahs) if n != anc))
683 return changectx(self._repo, anc)
683 return changectx(self._repo, anc)
684
684
685 def descendant(self, other):
685 def descendant(self, other):
686 """True if other is descendant of this changeset"""
686 """True if other is descendant of this changeset"""
687 return self._repo.changelog.descendant(self._rev, other._rev)
687 return self._repo.changelog.descendant(self._rev, other._rev)
688
688
689 def walk(self, match):
689 def walk(self, match):
690 '''Generates matching file names.'''
690 '''Generates matching file names.'''
691
691
692 # Wrap match.bad method to have message with nodeid
692 # Wrap match.bad method to have message with nodeid
693 def bad(fn, msg):
693 def bad(fn, msg):
694 # The manifest doesn't know about subrepos, so don't complain about
694 # The manifest doesn't know about subrepos, so don't complain about
695 # paths into valid subrepos.
695 # paths into valid subrepos.
696 if any(fn == s or fn.startswith(s + '/')
696 if any(fn == s or fn.startswith(s + '/')
697 for s in self.substate):
697 for s in self.substate):
698 return
698 return
699 match.bad(fn, _('no such file in rev %s') % self)
699 match.bad(fn, _('no such file in rev %s') % self)
700
700
701 m = matchmod.badmatch(match, bad)
701 m = matchmod.badmatch(match, bad)
702 return self._manifest.walk(m)
702 return self._manifest.walk(m)
703
703
704 def matches(self, match):
704 def matches(self, match):
705 return self.walk(match)
705 return self.walk(match)
706
706
707 class basefilectx(object):
707 class basefilectx(object):
708 """A filecontext object represents the common logic for its children:
708 """A filecontext object represents the common logic for its children:
709 filectx: read-only access to a filerevision that is already present
709 filectx: read-only access to a filerevision that is already present
710 in the repo,
710 in the repo,
711 workingfilectx: a filecontext that represents files from the working
711 workingfilectx: a filecontext that represents files from the working
712 directory,
712 directory,
713 memfilectx: a filecontext that represents files in-memory,
713 memfilectx: a filecontext that represents files in-memory,
714 overlayfilectx: duplicate another filecontext with some fields overridden.
714 overlayfilectx: duplicate another filecontext with some fields overridden.
715 """
715 """
716 @propertycache
716 @propertycache
717 def _filelog(self):
717 def _filelog(self):
718 return self._repo.file(self._path)
718 return self._repo.file(self._path)
719
719
720 @propertycache
720 @propertycache
721 def _changeid(self):
721 def _changeid(self):
722 if r'_changeid' in self.__dict__:
722 if r'_changeid' in self.__dict__:
723 return self._changeid
723 return self._changeid
724 elif r'_changectx' in self.__dict__:
724 elif r'_changectx' in self.__dict__:
725 return self._changectx.rev()
725 return self._changectx.rev()
726 elif r'_descendantrev' in self.__dict__:
726 elif r'_descendantrev' in self.__dict__:
727 # this file context was created from a revision with a known
727 # this file context was created from a revision with a known
728 # descendant, we can (lazily) correct for linkrev aliases
728 # descendant, we can (lazily) correct for linkrev aliases
729 return self._adjustlinkrev(self._descendantrev)
729 return self._adjustlinkrev(self._descendantrev)
730 else:
730 else:
731 return self._filelog.linkrev(self._filerev)
731 return self._filelog.linkrev(self._filerev)
732
732
733 @propertycache
733 @propertycache
734 def _filenode(self):
734 def _filenode(self):
735 if r'_fileid' in self.__dict__:
735 if r'_fileid' in self.__dict__:
736 return self._filelog.lookup(self._fileid)
736 return self._filelog.lookup(self._fileid)
737 else:
737 else:
738 return self._changectx.filenode(self._path)
738 return self._changectx.filenode(self._path)
739
739
740 @propertycache
740 @propertycache
741 def _filerev(self):
741 def _filerev(self):
742 return self._filelog.rev(self._filenode)
742 return self._filelog.rev(self._filenode)
743
743
744 @propertycache
744 @propertycache
745 def _repopath(self):
745 def _repopath(self):
746 return self._path
746 return self._path
747
747
748 def __nonzero__(self):
748 def __nonzero__(self):
749 try:
749 try:
750 self._filenode
750 self._filenode
751 return True
751 return True
752 except error.LookupError:
752 except error.LookupError:
753 # file is missing
753 # file is missing
754 return False
754 return False
755
755
756 __bool__ = __nonzero__
756 __bool__ = __nonzero__
757
757
758 def __bytes__(self):
758 def __bytes__(self):
759 try:
759 try:
760 return "%s@%s" % (self.path(), self._changectx)
760 return "%s@%s" % (self.path(), self._changectx)
761 except error.LookupError:
761 except error.LookupError:
762 return "%s@???" % self.path()
762 return "%s@???" % self.path()
763
763
764 __str__ = encoding.strmethod(__bytes__)
764 __str__ = encoding.strmethod(__bytes__)
765
765
766 def __repr__(self):
766 def __repr__(self):
767 return "<%s %s>" % (type(self).__name__, str(self))
767 return "<%s %s>" % (type(self).__name__, str(self))
768
768
769 def __hash__(self):
769 def __hash__(self):
770 try:
770 try:
771 return hash((self._path, self._filenode))
771 return hash((self._path, self._filenode))
772 except AttributeError:
772 except AttributeError:
773 return id(self)
773 return id(self)
774
774
775 def __eq__(self, other):
775 def __eq__(self, other):
776 try:
776 try:
777 return (type(self) == type(other) and self._path == other._path
777 return (type(self) == type(other) and self._path == other._path
778 and self._filenode == other._filenode)
778 and self._filenode == other._filenode)
779 except AttributeError:
779 except AttributeError:
780 return False
780 return False
781
781
782 def __ne__(self, other):
782 def __ne__(self, other):
783 return not (self == other)
783 return not (self == other)
784
784
785 def filerev(self):
785 def filerev(self):
786 return self._filerev
786 return self._filerev
787 def filenode(self):
787 def filenode(self):
788 return self._filenode
788 return self._filenode
789 @propertycache
789 @propertycache
790 def _flags(self):
790 def _flags(self):
791 return self._changectx.flags(self._path)
791 return self._changectx.flags(self._path)
792 def flags(self):
792 def flags(self):
793 return self._flags
793 return self._flags
794 def filelog(self):
794 def filelog(self):
795 return self._filelog
795 return self._filelog
796 def rev(self):
796 def rev(self):
797 return self._changeid
797 return self._changeid
798 def linkrev(self):
798 def linkrev(self):
799 return self._filelog.linkrev(self._filerev)
799 return self._filelog.linkrev(self._filerev)
800 def node(self):
800 def node(self):
801 return self._changectx.node()
801 return self._changectx.node()
802 def hex(self):
802 def hex(self):
803 return self._changectx.hex()
803 return self._changectx.hex()
804 def user(self):
804 def user(self):
805 return self._changectx.user()
805 return self._changectx.user()
806 def date(self):
806 def date(self):
807 return self._changectx.date()
807 return self._changectx.date()
808 def files(self):
808 def files(self):
809 return self._changectx.files()
809 return self._changectx.files()
810 def description(self):
810 def description(self):
811 return self._changectx.description()
811 return self._changectx.description()
812 def branch(self):
812 def branch(self):
813 return self._changectx.branch()
813 return self._changectx.branch()
814 def extra(self):
814 def extra(self):
815 return self._changectx.extra()
815 return self._changectx.extra()
816 def phase(self):
816 def phase(self):
817 return self._changectx.phase()
817 return self._changectx.phase()
818 def phasestr(self):
818 def phasestr(self):
819 return self._changectx.phasestr()
819 return self._changectx.phasestr()
820 def manifest(self):
820 def manifest(self):
821 return self._changectx.manifest()
821 return self._changectx.manifest()
822 def changectx(self):
822 def changectx(self):
823 return self._changectx
823 return self._changectx
824 def renamed(self):
824 def renamed(self):
825 return self._copied
825 return self._copied
826 def repo(self):
826 def repo(self):
827 return self._repo
827 return self._repo
828 def size(self):
828 def size(self):
829 return len(self.data())
829 return len(self.data())
830
830
831 def path(self):
831 def path(self):
832 return self._path
832 return self._path
833
833
834 def isbinary(self):
834 def isbinary(self):
835 try:
835 try:
836 return util.binary(self.data())
836 return util.binary(self.data())
837 except IOError:
837 except IOError:
838 return False
838 return False
839 def isexec(self):
839 def isexec(self):
840 return 'x' in self.flags()
840 return 'x' in self.flags()
841 def islink(self):
841 def islink(self):
842 return 'l' in self.flags()
842 return 'l' in self.flags()
843
843
844 def isabsent(self):
844 def isabsent(self):
845 """whether this filectx represents a file not in self._changectx
845 """whether this filectx represents a file not in self._changectx
846
846
847 This is mainly for merge code to detect change/delete conflicts. This is
847 This is mainly for merge code to detect change/delete conflicts. This is
848 expected to be True for all subclasses of basectx."""
848 expected to be True for all subclasses of basectx."""
849 return False
849 return False
850
850
851 _customcmp = False
851 _customcmp = False
852 def cmp(self, fctx):
852 def cmp(self, fctx):
853 """compare with other file context
853 """compare with other file context
854
854
855 returns True if different than fctx.
855 returns True if different than fctx.
856 """
856 """
857 if fctx._customcmp:
857 if fctx._customcmp:
858 return fctx.cmp(self)
858 return fctx.cmp(self)
859
859
860 if (fctx._filenode is None
860 if (fctx._filenode is None
861 and (self._repo._encodefilterpats
861 and (self._repo._encodefilterpats
862 # if file data starts with '\1\n', empty metadata block is
862 # if file data starts with '\1\n', empty metadata block is
863 # prepended, which adds 4 bytes to filelog.size().
863 # prepended, which adds 4 bytes to filelog.size().
864 or self.size() - 4 == fctx.size())
864 or self.size() - 4 == fctx.size())
865 or self.size() == fctx.size()):
865 or self.size() == fctx.size()):
866 return self._filelog.cmp(self._filenode, fctx.data())
866 return self._filelog.cmp(self._filenode, fctx.data())
867
867
868 return True
868 return True
869
869
870 def _adjustlinkrev(self, srcrev, inclusive=False):
870 def _adjustlinkrev(self, srcrev, inclusive=False):
871 """return the first ancestor of <srcrev> introducing <fnode>
871 """return the first ancestor of <srcrev> introducing <fnode>
872
872
873 If the linkrev of the file revision does not point to an ancestor of
873 If the linkrev of the file revision does not point to an ancestor of
874 srcrev, we'll walk down the ancestors until we find one introducing
874 srcrev, we'll walk down the ancestors until we find one introducing
875 this file revision.
875 this file revision.
876
876
877 :srcrev: the changeset revision we search ancestors from
877 :srcrev: the changeset revision we search ancestors from
878 :inclusive: if true, the src revision will also be checked
878 :inclusive: if true, the src revision will also be checked
879 """
879 """
880 repo = self._repo
880 repo = self._repo
881 cl = repo.unfiltered().changelog
881 cl = repo.unfiltered().changelog
882 mfl = repo.manifestlog
882 mfl = repo.manifestlog
883 # fetch the linkrev
883 # fetch the linkrev
884 lkr = self.linkrev()
884 lkr = self.linkrev()
885 # hack to reuse ancestor computation when searching for renames
885 # hack to reuse ancestor computation when searching for renames
886 memberanc = getattr(self, '_ancestrycontext', None)
886 memberanc = getattr(self, '_ancestrycontext', None)
887 iteranc = None
887 iteranc = None
888 if srcrev is None:
888 if srcrev is None:
889 # wctx case, used by workingfilectx during mergecopy
889 # wctx case, used by workingfilectx during mergecopy
890 revs = [p.rev() for p in self._repo[None].parents()]
890 revs = [p.rev() for p in self._repo[None].parents()]
891 inclusive = True # we skipped the real (revless) source
891 inclusive = True # we skipped the real (revless) source
892 else:
892 else:
893 revs = [srcrev]
893 revs = [srcrev]
894 if memberanc is None:
894 if memberanc is None:
895 memberanc = iteranc = cl.ancestors(revs, lkr,
895 memberanc = iteranc = cl.ancestors(revs, lkr,
896 inclusive=inclusive)
896 inclusive=inclusive)
897 # check if this linkrev is an ancestor of srcrev
897 # check if this linkrev is an ancestor of srcrev
898 if lkr not in memberanc:
898 if lkr not in memberanc:
899 if iteranc is None:
899 if iteranc is None:
900 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
900 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
901 fnode = self._filenode
901 fnode = self._filenode
902 path = self._path
902 path = self._path
903 for a in iteranc:
903 for a in iteranc:
904 ac = cl.read(a) # get changeset data (we avoid object creation)
904 ac = cl.read(a) # get changeset data (we avoid object creation)
905 if path in ac[3]: # checking the 'files' field.
905 if path in ac[3]: # checking the 'files' field.
906 # The file has been touched, check if the content is
906 # The file has been touched, check if the content is
907 # similar to the one we search for.
907 # similar to the one we search for.
908 if fnode == mfl[ac[0]].readfast().get(path):
908 if fnode == mfl[ac[0]].readfast().get(path):
909 return a
909 return a
910 # In theory, we should never get out of that loop without a result.
910 # In theory, we should never get out of that loop without a result.
911 # But if manifest uses a buggy file revision (not children of the
911 # But if manifest uses a buggy file revision (not children of the
912 # one it replaces) we could. Such a buggy situation will likely
912 # one it replaces) we could. Such a buggy situation will likely
913 # result is crash somewhere else at to some point.
913 # result is crash somewhere else at to some point.
914 return lkr
914 return lkr
915
915
916 def introrev(self):
916 def introrev(self):
917 """return the rev of the changeset which introduced this file revision
917 """return the rev of the changeset which introduced this file revision
918
918
919 This method is different from linkrev because it take into account the
919 This method is different from linkrev because it take into account the
920 changeset the filectx was created from. It ensures the returned
920 changeset the filectx was created from. It ensures the returned
921 revision is one of its ancestors. This prevents bugs from
921 revision is one of its ancestors. This prevents bugs from
922 'linkrev-shadowing' when a file revision is used by multiple
922 'linkrev-shadowing' when a file revision is used by multiple
923 changesets.
923 changesets.
924 """
924 """
925 lkr = self.linkrev()
925 lkr = self.linkrev()
926 attrs = vars(self)
926 attrs = vars(self)
927 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
927 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
928 if noctx or self.rev() == lkr:
928 if noctx or self.rev() == lkr:
929 return self.linkrev()
929 return self.linkrev()
930 return self._adjustlinkrev(self.rev(), inclusive=True)
930 return self._adjustlinkrev(self.rev(), inclusive=True)
931
931
932 def _parentfilectx(self, path, fileid, filelog):
932 def _parentfilectx(self, path, fileid, filelog):
933 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
933 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
934 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
934 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
935 if '_changeid' in vars(self) or '_changectx' in vars(self):
935 if '_changeid' in vars(self) or '_changectx' in vars(self):
936 # If self is associated with a changeset (probably explicitly
936 # If self is associated with a changeset (probably explicitly
937 # fed), ensure the created filectx is associated with a
937 # fed), ensure the created filectx is associated with a
938 # changeset that is an ancestor of self.changectx.
938 # changeset that is an ancestor of self.changectx.
939 # This lets us later use _adjustlinkrev to get a correct link.
939 # This lets us later use _adjustlinkrev to get a correct link.
940 fctx._descendantrev = self.rev()
940 fctx._descendantrev = self.rev()
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 elif '_descendantrev' in vars(self):
942 elif '_descendantrev' in vars(self):
943 # Otherwise propagate _descendantrev if we have one associated.
943 # Otherwise propagate _descendantrev if we have one associated.
944 fctx._descendantrev = self._descendantrev
944 fctx._descendantrev = self._descendantrev
945 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
945 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
946 return fctx
946 return fctx
947
947
948 def parents(self):
948 def parents(self):
949 _path = self._path
949 _path = self._path
950 fl = self._filelog
950 fl = self._filelog
951 parents = self._filelog.parents(self._filenode)
951 parents = self._filelog.parents(self._filenode)
952 pl = [(_path, node, fl) for node in parents if node != nullid]
952 pl = [(_path, node, fl) for node in parents if node != nullid]
953
953
954 r = fl.renamed(self._filenode)
954 r = fl.renamed(self._filenode)
955 if r:
955 if r:
956 # - In the simple rename case, both parent are nullid, pl is empty.
956 # - In the simple rename case, both parent are nullid, pl is empty.
957 # - In case of merge, only one of the parent is null id and should
957 # - In case of merge, only one of the parent is null id and should
958 # be replaced with the rename information. This parent is -always-
958 # be replaced with the rename information. This parent is -always-
959 # the first one.
959 # the first one.
960 #
960 #
961 # As null id have always been filtered out in the previous list
961 # As null id have always been filtered out in the previous list
962 # comprehension, inserting to 0 will always result in "replacing
962 # comprehension, inserting to 0 will always result in "replacing
963 # first nullid parent with rename information.
963 # first nullid parent with rename information.
964 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
964 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
965
965
966 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
966 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
967
967
968 def p1(self):
968 def p1(self):
969 return self.parents()[0]
969 return self.parents()[0]
970
970
971 def p2(self):
971 def p2(self):
972 p = self.parents()
972 p = self.parents()
973 if len(p) == 2:
973 if len(p) == 2:
974 return p[1]
974 return p[1]
975 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
975 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
976
976
977 def annotate(self, follow=False, linenumber=False, skiprevs=None,
977 def annotate(self, follow=False, linenumber=False, skiprevs=None,
978 diffopts=None):
978 diffopts=None):
979 '''returns a list of tuples of ((ctx, number), line) for each line
979 '''returns a list of tuples of ((ctx, number), line) for each line
980 in the file, where ctx is the filectx of the node where
980 in the file, where ctx is the filectx of the node where
981 that line was last changed; if linenumber parameter is true, number is
981 that line was last changed; if linenumber parameter is true, number is
982 the line number at the first appearance in the managed file, otherwise,
982 the line number at the first appearance in the managed file, otherwise,
983 number has a fixed value of False.
983 number has a fixed value of False.
984 '''
984 '''
985
985
986 def lines(text):
986 def lines(text):
987 if text.endswith("\n"):
987 if text.endswith("\n"):
988 return text.count("\n")
988 return text.count("\n")
989 return text.count("\n") + int(bool(text))
989 return text.count("\n") + int(bool(text))
990
990
991 if linenumber:
991 if linenumber:
992 def decorate(text, rev):
992 def decorate(text, rev):
993 return ([annotateline(fctx=rev, lineno=i)
993 return ([annotateline(fctx=rev, lineno=i)
994 for i in xrange(1, lines(text) + 1)], text)
994 for i in xrange(1, lines(text) + 1)], text)
995 else:
995 else:
996 def decorate(text, rev):
996 def decorate(text, rev):
997 return ([annotateline(fctx=rev)] * lines(text), text)
997 return ([annotateline(fctx=rev)] * lines(text), text)
998
998
999 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
999 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1000
1000
1001 def parents(f):
1001 def parents(f):
1002 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1002 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1003 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1003 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1004 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1004 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1005 # isn't an ancestor of the srcrev.
1005 # isn't an ancestor of the srcrev.
1006 f._changeid
1006 f._changeid
1007 pl = f.parents()
1007 pl = f.parents()
1008
1008
1009 # Don't return renamed parents if we aren't following.
1009 # Don't return renamed parents if we aren't following.
1010 if not follow:
1010 if not follow:
1011 pl = [p for p in pl if p.path() == f.path()]
1011 pl = [p for p in pl if p.path() == f.path()]
1012
1012
1013 # renamed filectx won't have a filelog yet, so set it
1013 # renamed filectx won't have a filelog yet, so set it
1014 # from the cache to save time
1014 # from the cache to save time
1015 for p in pl:
1015 for p in pl:
1016 if not '_filelog' in p.__dict__:
1016 if not '_filelog' in p.__dict__:
1017 p._filelog = getlog(p.path())
1017 p._filelog = getlog(p.path())
1018
1018
1019 return pl
1019 return pl
1020
1020
1021 # use linkrev to find the first changeset where self appeared
1021 # use linkrev to find the first changeset where self appeared
1022 base = self
1022 base = self
1023 introrev = self.introrev()
1023 introrev = self.introrev()
1024 if self.rev() != introrev:
1024 if self.rev() != introrev:
1025 base = self.filectx(self.filenode(), changeid=introrev)
1025 base = self.filectx(self.filenode(), changeid=introrev)
1026 if getattr(base, '_ancestrycontext', None) is None:
1026 if getattr(base, '_ancestrycontext', None) is None:
1027 cl = self._repo.changelog
1027 cl = self._repo.changelog
1028 if introrev is None:
1028 if introrev is None:
1029 # wctx is not inclusive, but works because _ancestrycontext
1029 # wctx is not inclusive, but works because _ancestrycontext
1030 # is used to test filelog revisions
1030 # is used to test filelog revisions
1031 ac = cl.ancestors([p.rev() for p in base.parents()],
1031 ac = cl.ancestors([p.rev() for p in base.parents()],
1032 inclusive=True)
1032 inclusive=True)
1033 else:
1033 else:
1034 ac = cl.ancestors([introrev], inclusive=True)
1034 ac = cl.ancestors([introrev], inclusive=True)
1035 base._ancestrycontext = ac
1035 base._ancestrycontext = ac
1036
1036
1037 # This algorithm would prefer to be recursive, but Python is a
1037 # This algorithm would prefer to be recursive, but Python is a
1038 # bit recursion-hostile. Instead we do an iterative
1038 # bit recursion-hostile. Instead we do an iterative
1039 # depth-first search.
1039 # depth-first search.
1040
1040
1041 # 1st DFS pre-calculates pcache and needed
1041 # 1st DFS pre-calculates pcache and needed
1042 visit = [base]
1042 visit = [base]
1043 pcache = {}
1043 pcache = {}
1044 needed = {base: 1}
1044 needed = {base: 1}
1045 while visit:
1045 while visit:
1046 f = visit.pop()
1046 f = visit.pop()
1047 if f in pcache:
1047 if f in pcache:
1048 continue
1048 continue
1049 pl = parents(f)
1049 pl = parents(f)
1050 pcache[f] = pl
1050 pcache[f] = pl
1051 for p in pl:
1051 for p in pl:
1052 needed[p] = needed.get(p, 0) + 1
1052 needed[p] = needed.get(p, 0) + 1
1053 if p not in pcache:
1053 if p not in pcache:
1054 visit.append(p)
1054 visit.append(p)
1055
1055
1056 # 2nd DFS does the actual annotate
1056 # 2nd DFS does the actual annotate
1057 visit[:] = [base]
1057 visit[:] = [base]
1058 hist = {}
1058 hist = {}
1059 while visit:
1059 while visit:
1060 f = visit[-1]
1060 f = visit[-1]
1061 if f in hist:
1061 if f in hist:
1062 visit.pop()
1062 visit.pop()
1063 continue
1063 continue
1064
1064
1065 ready = True
1065 ready = True
1066 pl = pcache[f]
1066 pl = pcache[f]
1067 for p in pl:
1067 for p in pl:
1068 if p not in hist:
1068 if p not in hist:
1069 ready = False
1069 ready = False
1070 visit.append(p)
1070 visit.append(p)
1071 if ready:
1071 if ready:
1072 visit.pop()
1072 visit.pop()
1073 curr = decorate(f.data(), f)
1073 curr = decorate(f.data(), f)
1074 skipchild = False
1074 skipchild = False
1075 if skiprevs is not None:
1075 if skiprevs is not None:
1076 skipchild = f._changeid in skiprevs
1076 skipchild = f._changeid in skiprevs
1077 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1077 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1078 diffopts)
1078 diffopts)
1079 for p in pl:
1079 for p in pl:
1080 if needed[p] == 1:
1080 if needed[p] == 1:
1081 del hist[p]
1081 del hist[p]
1082 del needed[p]
1082 del needed[p]
1083 else:
1083 else:
1084 needed[p] -= 1
1084 needed[p] -= 1
1085
1085
1086 hist[f] = curr
1086 hist[f] = curr
1087 del pcache[f]
1087 del pcache[f]
1088
1088
1089 return zip(hist[base][0], hist[base][1].splitlines(True))
1089 return zip(hist[base][0], hist[base][1].splitlines(True))
1090
1090
1091 def ancestors(self, followfirst=False):
1091 def ancestors(self, followfirst=False):
1092 visit = {}
1092 visit = {}
1093 c = self
1093 c = self
1094 if followfirst:
1094 if followfirst:
1095 cut = 1
1095 cut = 1
1096 else:
1096 else:
1097 cut = None
1097 cut = None
1098
1098
1099 while True:
1099 while True:
1100 for parent in c.parents()[:cut]:
1100 for parent in c.parents()[:cut]:
1101 visit[(parent.linkrev(), parent.filenode())] = parent
1101 visit[(parent.linkrev(), parent.filenode())] = parent
1102 if not visit:
1102 if not visit:
1103 break
1103 break
1104 c = visit.pop(max(visit))
1104 c = visit.pop(max(visit))
1105 yield c
1105 yield c
1106
1106
1107 def decodeddata(self):
1107 def decodeddata(self):
1108 """Returns `data()` after running repository decoding filters.
1108 """Returns `data()` after running repository decoding filters.
1109
1109
1110 This is often equivalent to how the data would be expressed on disk.
1110 This is often equivalent to how the data would be expressed on disk.
1111 """
1111 """
1112 return self._repo.wwritedata(self.path(), self.data())
1112 return self._repo.wwritedata(self.path(), self.data())
1113
1113
1114 @attr.s(slots=True, frozen=True)
1114 @attr.s(slots=True, frozen=True)
1115 class annotateline(object):
1115 class annotateline(object):
1116 fctx = attr.ib()
1116 fctx = attr.ib()
1117 lineno = attr.ib(default=False)
1117 lineno = attr.ib(default=False)
1118 # Whether this annotation was the result of a skip-annotate.
1118 # Whether this annotation was the result of a skip-annotate.
1119 skip = attr.ib(default=False)
1119 skip = attr.ib(default=False)
1120
1120
1121 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1121 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1122 r'''
1122 r'''
1123 Given parent and child fctxes and annotate data for parents, for all lines
1123 Given parent and child fctxes and annotate data for parents, for all lines
1124 in either parent that match the child, annotate the child with the parent's
1124 in either parent that match the child, annotate the child with the parent's
1125 data.
1125 data.
1126
1126
1127 Additionally, if `skipchild` is True, replace all other lines with parent
1127 Additionally, if `skipchild` is True, replace all other lines with parent
1128 annotate data as well such that child is never blamed for any lines.
1128 annotate data as well such that child is never blamed for any lines.
1129
1129
1130 See test-annotate.py for unit tests.
1130 See test-annotate.py for unit tests.
1131 '''
1131 '''
1132 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1132 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1133 for parent in parents]
1133 for parent in parents]
1134
1134
1135 if skipchild:
1135 if skipchild:
1136 # Need to iterate over the blocks twice -- make it a list
1136 # Need to iterate over the blocks twice -- make it a list
1137 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1137 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1138 # Mercurial currently prefers p2 over p1 for annotate.
1138 # Mercurial currently prefers p2 over p1 for annotate.
1139 # TODO: change this?
1139 # TODO: change this?
1140 for parent, blocks in pblocks:
1140 for parent, blocks in pblocks:
1141 for (a1, a2, b1, b2), t in blocks:
1141 for (a1, a2, b1, b2), t in blocks:
1142 # Changed blocks ('!') or blocks made only of blank lines ('~')
1142 # Changed blocks ('!') or blocks made only of blank lines ('~')
1143 # belong to the child.
1143 # belong to the child.
1144 if t == '=':
1144 if t == '=':
1145 child[0][b1:b2] = parent[0][a1:a2]
1145 child[0][b1:b2] = parent[0][a1:a2]
1146
1146
1147 if skipchild:
1147 if skipchild:
1148 # Now try and match up anything that couldn't be matched,
1148 # Now try and match up anything that couldn't be matched,
1149 # Reversing pblocks maintains bias towards p2, matching above
1149 # Reversing pblocks maintains bias towards p2, matching above
1150 # behavior.
1150 # behavior.
1151 pblocks.reverse()
1151 pblocks.reverse()
1152
1152
1153 # The heuristics are:
1153 # The heuristics are:
1154 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1154 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1155 # This could potentially be smarter but works well enough.
1155 # This could potentially be smarter but works well enough.
1156 # * For a non-matching section, do a best-effort fit. Match lines in
1156 # * For a non-matching section, do a best-effort fit. Match lines in
1157 # diff hunks 1:1, dropping lines as necessary.
1157 # diff hunks 1:1, dropping lines as necessary.
1158 # * Repeat the last line as a last resort.
1158 # * Repeat the last line as a last resort.
1159
1159
1160 # First, replace as much as possible without repeating the last line.
1160 # First, replace as much as possible without repeating the last line.
1161 remaining = [(parent, []) for parent, _blocks in pblocks]
1161 remaining = [(parent, []) for parent, _blocks in pblocks]
1162 for idx, (parent, blocks) in enumerate(pblocks):
1162 for idx, (parent, blocks) in enumerate(pblocks):
1163 for (a1, a2, b1, b2), _t in blocks:
1163 for (a1, a2, b1, b2), _t in blocks:
1164 if a2 - a1 >= b2 - b1:
1164 if a2 - a1 >= b2 - b1:
1165 for bk in xrange(b1, b2):
1165 for bk in xrange(b1, b2):
1166 if child[0][bk].fctx == childfctx:
1166 if child[0][bk].fctx == childfctx:
1167 ak = min(a1 + (bk - b1), a2 - 1)
1167 ak = min(a1 + (bk - b1), a2 - 1)
1168 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1168 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1169 else:
1169 else:
1170 remaining[idx][1].append((a1, a2, b1, b2))
1170 remaining[idx][1].append((a1, a2, b1, b2))
1171
1171
1172 # Then, look at anything left, which might involve repeating the last
1172 # Then, look at anything left, which might involve repeating the last
1173 # line.
1173 # line.
1174 for parent, blocks in remaining:
1174 for parent, blocks in remaining:
1175 for a1, a2, b1, b2 in blocks:
1175 for a1, a2, b1, b2 in blocks:
1176 for bk in xrange(b1, b2):
1176 for bk in xrange(b1, b2):
1177 if child[0][bk].fctx == childfctx:
1177 if child[0][bk].fctx == childfctx:
1178 ak = min(a1 + (bk - b1), a2 - 1)
1178 ak = min(a1 + (bk - b1), a2 - 1)
1179 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1179 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1180 return child
1180 return child
1181
1181
1182 class filectx(basefilectx):
1182 class filectx(basefilectx):
1183 """A filecontext object makes access to data related to a particular
1183 """A filecontext object makes access to data related to a particular
1184 filerevision convenient."""
1184 filerevision convenient."""
1185 def __init__(self, repo, path, changeid=None, fileid=None,
1185 def __init__(self, repo, path, changeid=None, fileid=None,
1186 filelog=None, changectx=None):
1186 filelog=None, changectx=None):
1187 """changeid can be a changeset revision, node, or tag.
1187 """changeid can be a changeset revision, node, or tag.
1188 fileid can be a file revision or node."""
1188 fileid can be a file revision or node."""
1189 self._repo = repo
1189 self._repo = repo
1190 self._path = path
1190 self._path = path
1191
1191
1192 assert (changeid is not None
1192 assert (changeid is not None
1193 or fileid is not None
1193 or fileid is not None
1194 or changectx is not None), \
1194 or changectx is not None), \
1195 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1195 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1196 % (changeid, fileid, changectx))
1196 % (changeid, fileid, changectx))
1197
1197
1198 if filelog is not None:
1198 if filelog is not None:
1199 self._filelog = filelog
1199 self._filelog = filelog
1200
1200
1201 if changeid is not None:
1201 if changeid is not None:
1202 self._changeid = changeid
1202 self._changeid = changeid
1203 if changectx is not None:
1203 if changectx is not None:
1204 self._changectx = changectx
1204 self._changectx = changectx
1205 if fileid is not None:
1205 if fileid is not None:
1206 self._fileid = fileid
1206 self._fileid = fileid
1207
1207
1208 @propertycache
1208 @propertycache
1209 def _changectx(self):
1209 def _changectx(self):
1210 try:
1210 try:
1211 return changectx(self._repo, self._changeid)
1211 return changectx(self._repo, self._changeid)
1212 except error.FilteredRepoLookupError:
1212 except error.FilteredRepoLookupError:
1213 # Linkrev may point to any revision in the repository. When the
1213 # Linkrev may point to any revision in the repository. When the
1214 # repository is filtered this may lead to `filectx` trying to build
1214 # repository is filtered this may lead to `filectx` trying to build
1215 # `changectx` for filtered revision. In such case we fallback to
1215 # `changectx` for filtered revision. In such case we fallback to
1216 # creating `changectx` on the unfiltered version of the reposition.
1216 # creating `changectx` on the unfiltered version of the reposition.
1217 # This fallback should not be an issue because `changectx` from
1217 # This fallback should not be an issue because `changectx` from
1218 # `filectx` are not used in complex operations that care about
1218 # `filectx` are not used in complex operations that care about
1219 # filtering.
1219 # filtering.
1220 #
1220 #
1221 # This fallback is a cheap and dirty fix that prevent several
1221 # This fallback is a cheap and dirty fix that prevent several
1222 # crashes. It does not ensure the behavior is correct. However the
1222 # crashes. It does not ensure the behavior is correct. However the
1223 # behavior was not correct before filtering either and "incorrect
1223 # behavior was not correct before filtering either and "incorrect
1224 # behavior" is seen as better as "crash"
1224 # behavior" is seen as better as "crash"
1225 #
1225 #
1226 # Linkrevs have several serious troubles with filtering that are
1226 # Linkrevs have several serious troubles with filtering that are
1227 # complicated to solve. Proper handling of the issue here should be
1227 # complicated to solve. Proper handling of the issue here should be
1228 # considered when solving linkrev issue are on the table.
1228 # considered when solving linkrev issue are on the table.
1229 return changectx(self._repo.unfiltered(), self._changeid)
1229 return changectx(self._repo.unfiltered(), self._changeid)
1230
1230
1231 def filectx(self, fileid, changeid=None):
1231 def filectx(self, fileid, changeid=None):
1232 '''opens an arbitrary revision of the file without
1232 '''opens an arbitrary revision of the file without
1233 opening a new filelog'''
1233 opening a new filelog'''
1234 return filectx(self._repo, self._path, fileid=fileid,
1234 return filectx(self._repo, self._path, fileid=fileid,
1235 filelog=self._filelog, changeid=changeid)
1235 filelog=self._filelog, changeid=changeid)
1236
1236
1237 def rawdata(self):
1237 def rawdata(self):
1238 return self._filelog.revision(self._filenode, raw=True)
1238 return self._filelog.revision(self._filenode, raw=True)
1239
1239
1240 def rawflags(self):
1240 def rawflags(self):
1241 """low-level revlog flags"""
1241 """low-level revlog flags"""
1242 return self._filelog.flags(self._filerev)
1242 return self._filelog.flags(self._filerev)
1243
1243
1244 def data(self):
1244 def data(self):
1245 try:
1245 try:
1246 return self._filelog.read(self._filenode)
1246 return self._filelog.read(self._filenode)
1247 except error.CensoredNodeError:
1247 except error.CensoredNodeError:
1248 if self._repo.ui.config("censor", "policy") == "ignore":
1248 if self._repo.ui.config("censor", "policy") == "ignore":
1249 return ""
1249 return ""
1250 raise error.Abort(_("censored node: %s") % short(self._filenode),
1250 raise error.Abort(_("censored node: %s") % short(self._filenode),
1251 hint=_("set censor.policy to ignore errors"))
1251 hint=_("set censor.policy to ignore errors"))
1252
1252
1253 def size(self):
1253 def size(self):
1254 return self._filelog.size(self._filerev)
1254 return self._filelog.size(self._filerev)
1255
1255
1256 @propertycache
1256 @propertycache
1257 def _copied(self):
1257 def _copied(self):
1258 """check if file was actually renamed in this changeset revision
1258 """check if file was actually renamed in this changeset revision
1259
1259
1260 If rename logged in file revision, we report copy for changeset only
1260 If rename logged in file revision, we report copy for changeset only
1261 if file revisions linkrev points back to the changeset in question
1261 if file revisions linkrev points back to the changeset in question
1262 or both changeset parents contain different file revisions.
1262 or both changeset parents contain different file revisions.
1263 """
1263 """
1264
1264
1265 renamed = self._filelog.renamed(self._filenode)
1265 renamed = self._filelog.renamed(self._filenode)
1266 if not renamed:
1266 if not renamed:
1267 return renamed
1267 return renamed
1268
1268
1269 if self.rev() == self.linkrev():
1269 if self.rev() == self.linkrev():
1270 return renamed
1270 return renamed
1271
1271
1272 name = self.path()
1272 name = self.path()
1273 fnode = self._filenode
1273 fnode = self._filenode
1274 for p in self._changectx.parents():
1274 for p in self._changectx.parents():
1275 try:
1275 try:
1276 if fnode == p.filenode(name):
1276 if fnode == p.filenode(name):
1277 return None
1277 return None
1278 except error.LookupError:
1278 except error.LookupError:
1279 pass
1279 pass
1280 return renamed
1280 return renamed
1281
1281
1282 def children(self):
1282 def children(self):
1283 # hard for renames
1283 # hard for renames
1284 c = self._filelog.children(self._filenode)
1284 c = self._filelog.children(self._filenode)
1285 return [filectx(self._repo, self._path, fileid=x,
1285 return [filectx(self._repo, self._path, fileid=x,
1286 filelog=self._filelog) for x in c]
1286 filelog=self._filelog) for x in c]
1287
1287
1288 class committablectx(basectx):
1288 class committablectx(basectx):
1289 """A committablectx object provides common functionality for a context that
1289 """A committablectx object provides common functionality for a context that
1290 wants the ability to commit, e.g. workingctx or memctx."""
1290 wants the ability to commit, e.g. workingctx or memctx."""
1291 def __init__(self, repo, text="", user=None, date=None, extra=None,
1291 def __init__(self, repo, text="", user=None, date=None, extra=None,
1292 changes=None):
1292 changes=None):
1293 self._repo = repo
1293 self._repo = repo
1294 self._rev = None
1294 self._rev = None
1295 self._node = None
1295 self._node = None
1296 self._text = text
1296 self._text = text
1297 if date:
1297 if date:
1298 self._date = util.parsedate(date)
1298 self._date = util.parsedate(date)
1299 if user:
1299 if user:
1300 self._user = user
1300 self._user = user
1301 if changes:
1301 if changes:
1302 self._status = changes
1302 self._status = changes
1303
1303
1304 self._extra = {}
1304 self._extra = {}
1305 if extra:
1305 if extra:
1306 self._extra = extra.copy()
1306 self._extra = extra.copy()
1307 if 'branch' not in self._extra:
1307 if 'branch' not in self._extra:
1308 try:
1308 try:
1309 branch = encoding.fromlocal(self._repo.dirstate.branch())
1309 branch = encoding.fromlocal(self._repo.dirstate.branch())
1310 except UnicodeDecodeError:
1310 except UnicodeDecodeError:
1311 raise error.Abort(_('branch name not in UTF-8!'))
1311 raise error.Abort(_('branch name not in UTF-8!'))
1312 self._extra['branch'] = branch
1312 self._extra['branch'] = branch
1313 if self._extra['branch'] == '':
1313 if self._extra['branch'] == '':
1314 self._extra['branch'] = 'default'
1314 self._extra['branch'] = 'default'
1315
1315
1316 def __bytes__(self):
1316 def __bytes__(self):
1317 return bytes(self._parents[0]) + "+"
1317 return bytes(self._parents[0]) + "+"
1318
1318
1319 __str__ = encoding.strmethod(__bytes__)
1319 __str__ = encoding.strmethod(__bytes__)
1320
1320
1321 def __nonzero__(self):
1321 def __nonzero__(self):
1322 return True
1322 return True
1323
1323
1324 __bool__ = __nonzero__
1324 __bool__ = __nonzero__
1325
1325
1326 def _buildflagfunc(self):
1326 def _buildflagfunc(self):
1327 # Create a fallback function for getting file flags when the
1327 # Create a fallback function for getting file flags when the
1328 # filesystem doesn't support them
1328 # filesystem doesn't support them
1329
1329
1330 copiesget = self._repo.dirstate.copies().get
1330 copiesget = self._repo.dirstate.copies().get
1331 parents = self.parents()
1331 parents = self.parents()
1332 if len(parents) < 2:
1332 if len(parents) < 2:
1333 # when we have one parent, it's easy: copy from parent
1333 # when we have one parent, it's easy: copy from parent
1334 man = parents[0].manifest()
1334 man = parents[0].manifest()
1335 def func(f):
1335 def func(f):
1336 f = copiesget(f, f)
1336 f = copiesget(f, f)
1337 return man.flags(f)
1337 return man.flags(f)
1338 else:
1338 else:
1339 # merges are tricky: we try to reconstruct the unstored
1339 # merges are tricky: we try to reconstruct the unstored
1340 # result from the merge (issue1802)
1340 # result from the merge (issue1802)
1341 p1, p2 = parents
1341 p1, p2 = parents
1342 pa = p1.ancestor(p2)
1342 pa = p1.ancestor(p2)
1343 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1343 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1344
1344
1345 def func(f):
1345 def func(f):
1346 f = copiesget(f, f) # may be wrong for merges with copies
1346 f = copiesget(f, f) # may be wrong for merges with copies
1347 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1347 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1348 if fl1 == fl2:
1348 if fl1 == fl2:
1349 return fl1
1349 return fl1
1350 if fl1 == fla:
1350 if fl1 == fla:
1351 return fl2
1351 return fl2
1352 if fl2 == fla:
1352 if fl2 == fla:
1353 return fl1
1353 return fl1
1354 return '' # punt for conflicts
1354 return '' # punt for conflicts
1355
1355
1356 return func
1356 return func
1357
1357
1358 @propertycache
1358 @propertycache
1359 def _flagfunc(self):
1359 def _flagfunc(self):
1360 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1360 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1361
1361
1362 @propertycache
1362 @propertycache
1363 def _status(self):
1363 def _status(self):
1364 return self._repo.status()
1364 return self._repo.status()
1365
1365
1366 @propertycache
1366 @propertycache
1367 def _user(self):
1367 def _user(self):
1368 return self._repo.ui.username()
1368 return self._repo.ui.username()
1369
1369
1370 @propertycache
1370 @propertycache
1371 def _date(self):
1371 def _date(self):
1372 ui = self._repo.ui
1372 ui = self._repo.ui
1373 date = ui.configdate('devel', 'default-date')
1373 date = ui.configdate('devel', 'default-date')
1374 if date is None:
1374 if date is None:
1375 date = util.makedate()
1375 date = util.makedate()
1376 return date
1376 return date
1377
1377
1378 def subrev(self, subpath):
1378 def subrev(self, subpath):
1379 return None
1379 return None
1380
1380
1381 def manifestnode(self):
1381 def manifestnode(self):
1382 return None
1382 return None
1383 def user(self):
1383 def user(self):
1384 return self._user or self._repo.ui.username()
1384 return self._user or self._repo.ui.username()
1385 def date(self):
1385 def date(self):
1386 return self._date
1386 return self._date
1387 def description(self):
1387 def description(self):
1388 return self._text
1388 return self._text
1389 def files(self):
1389 def files(self):
1390 return sorted(self._status.modified + self._status.added +
1390 return sorted(self._status.modified + self._status.added +
1391 self._status.removed)
1391 self._status.removed)
1392
1392
1393 def modified(self):
1393 def modified(self):
1394 return self._status.modified
1394 return self._status.modified
1395 def added(self):
1395 def added(self):
1396 return self._status.added
1396 return self._status.added
1397 def removed(self):
1397 def removed(self):
1398 return self._status.removed
1398 return self._status.removed
1399 def deleted(self):
1399 def deleted(self):
1400 return self._status.deleted
1400 return self._status.deleted
1401 def branch(self):
1401 def branch(self):
1402 return encoding.tolocal(self._extra['branch'])
1402 return encoding.tolocal(self._extra['branch'])
1403 def closesbranch(self):
1403 def closesbranch(self):
1404 return 'close' in self._extra
1404 return 'close' in self._extra
1405 def extra(self):
1405 def extra(self):
1406 return self._extra
1406 return self._extra
1407
1407
1408 def isinmemory(self):
1408 def isinmemory(self):
1409 return False
1409 return False
1410
1410
1411 def tags(self):
1411 def tags(self):
1412 return []
1412 return []
1413
1413
1414 def bookmarks(self):
1414 def bookmarks(self):
1415 b = []
1415 b = []
1416 for p in self.parents():
1416 for p in self.parents():
1417 b.extend(p.bookmarks())
1417 b.extend(p.bookmarks())
1418 return b
1418 return b
1419
1419
1420 def phase(self):
1420 def phase(self):
1421 phase = phases.draft # default phase to draft
1421 phase = phases.draft # default phase to draft
1422 for p in self.parents():
1422 for p in self.parents():
1423 phase = max(phase, p.phase())
1423 phase = max(phase, p.phase())
1424 return phase
1424 return phase
1425
1425
1426 def hidden(self):
1426 def hidden(self):
1427 return False
1427 return False
1428
1428
1429 def children(self):
1429 def children(self):
1430 return []
1430 return []
1431
1431
1432 def flags(self, path):
1432 def flags(self, path):
1433 if r'_manifest' in self.__dict__:
1433 if r'_manifest' in self.__dict__:
1434 try:
1434 try:
1435 return self._manifest.flags(path)
1435 return self._manifest.flags(path)
1436 except KeyError:
1436 except KeyError:
1437 return ''
1437 return ''
1438
1438
1439 try:
1439 try:
1440 return self._flagfunc(path)
1440 return self._flagfunc(path)
1441 except OSError:
1441 except OSError:
1442 return ''
1442 return ''
1443
1443
1444 def ancestor(self, c2):
1444 def ancestor(self, c2):
1445 """return the "best" ancestor context of self and c2"""
1445 """return the "best" ancestor context of self and c2"""
1446 return self._parents[0].ancestor(c2) # punt on two parents for now
1446 return self._parents[0].ancestor(c2) # punt on two parents for now
1447
1447
1448 def walk(self, match):
1448 def walk(self, match):
1449 '''Generates matching file names.'''
1449 '''Generates matching file names.'''
1450 return sorted(self._repo.dirstate.walk(match,
1450 return sorted(self._repo.dirstate.walk(match,
1451 subrepos=sorted(self.substate),
1451 subrepos=sorted(self.substate),
1452 unknown=True, ignored=False))
1452 unknown=True, ignored=False))
1453
1453
1454 def matches(self, match):
1454 def matches(self, match):
1455 return sorted(self._repo.dirstate.matches(match))
1455 return sorted(self._repo.dirstate.matches(match))
1456
1456
1457 def ancestors(self):
1457 def ancestors(self):
1458 for p in self._parents:
1458 for p in self._parents:
1459 yield p
1459 yield p
1460 for a in self._repo.changelog.ancestors(
1460 for a in self._repo.changelog.ancestors(
1461 [p.rev() for p in self._parents]):
1461 [p.rev() for p in self._parents]):
1462 yield changectx(self._repo, a)
1462 yield changectx(self._repo, a)
1463
1463
1464 def markcommitted(self, node):
1464 def markcommitted(self, node):
1465 """Perform post-commit cleanup necessary after committing this ctx
1465 """Perform post-commit cleanup necessary after committing this ctx
1466
1466
1467 Specifically, this updates backing stores this working context
1467 Specifically, this updates backing stores this working context
1468 wraps to reflect the fact that the changes reflected by this
1468 wraps to reflect the fact that the changes reflected by this
1469 workingctx have been committed. For example, it marks
1469 workingctx have been committed. For example, it marks
1470 modified and added files as normal in the dirstate.
1470 modified and added files as normal in the dirstate.
1471
1471
1472 """
1472 """
1473
1473
1474 with self._repo.dirstate.parentchange():
1474 with self._repo.dirstate.parentchange():
1475 for f in self.modified() + self.added():
1475 for f in self.modified() + self.added():
1476 self._repo.dirstate.normal(f)
1476 self._repo.dirstate.normal(f)
1477 for f in self.removed():
1477 for f in self.removed():
1478 self._repo.dirstate.drop(f)
1478 self._repo.dirstate.drop(f)
1479 self._repo.dirstate.setparents(node)
1479 self._repo.dirstate.setparents(node)
1480
1480
1481 # write changes out explicitly, because nesting wlock at
1481 # write changes out explicitly, because nesting wlock at
1482 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1482 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1483 # from immediately doing so for subsequent changing files
1483 # from immediately doing so for subsequent changing files
1484 self._repo.dirstate.write(self._repo.currenttransaction())
1484 self._repo.dirstate.write(self._repo.currenttransaction())
1485
1485
1486 def dirty(self, missing=False, merge=True, branch=True):
1486 def dirty(self, missing=False, merge=True, branch=True):
1487 return False
1487 return False
1488
1488
1489 class workingctx(committablectx):
1489 class workingctx(committablectx):
1490 """A workingctx object makes access to data related to
1490 """A workingctx object makes access to data related to
1491 the current working directory convenient.
1491 the current working directory convenient.
1492 date - any valid date string or (unixtime, offset), or None.
1492 date - any valid date string or (unixtime, offset), or None.
1493 user - username string, or None.
1493 user - username string, or None.
1494 extra - a dictionary of extra values, or None.
1494 extra - a dictionary of extra values, or None.
1495 changes - a list of file lists as returned by localrepo.status()
1495 changes - a list of file lists as returned by localrepo.status()
1496 or None to use the repository status.
1496 or None to use the repository status.
1497 """
1497 """
1498 def __init__(self, repo, text="", user=None, date=None, extra=None,
1498 def __init__(self, repo, text="", user=None, date=None, extra=None,
1499 changes=None):
1499 changes=None):
1500 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1500 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1501
1501
1502 def __iter__(self):
1502 def __iter__(self):
1503 d = self._repo.dirstate
1503 d = self._repo.dirstate
1504 for f in d:
1504 for f in d:
1505 if d[f] != 'r':
1505 if d[f] != 'r':
1506 yield f
1506 yield f
1507
1507
1508 def __contains__(self, key):
1508 def __contains__(self, key):
1509 return self._repo.dirstate[key] not in "?r"
1509 return self._repo.dirstate[key] not in "?r"
1510
1510
1511 def hex(self):
1511 def hex(self):
1512 return hex(wdirid)
1512 return hex(wdirid)
1513
1513
1514 @propertycache
1514 @propertycache
1515 def _parents(self):
1515 def _parents(self):
1516 p = self._repo.dirstate.parents()
1516 p = self._repo.dirstate.parents()
1517 if p[1] == nullid:
1517 if p[1] == nullid:
1518 p = p[:-1]
1518 p = p[:-1]
1519 return [changectx(self._repo, x) for x in p]
1519 return [changectx(self._repo, x) for x in p]
1520
1520
1521 def filectx(self, path, filelog=None):
1521 def filectx(self, path, filelog=None):
1522 """get a file context from the working directory"""
1522 """get a file context from the working directory"""
1523 return workingfilectx(self._repo, path, workingctx=self,
1523 return workingfilectx(self._repo, path, workingctx=self,
1524 filelog=filelog)
1524 filelog=filelog)
1525
1525
1526 def dirty(self, missing=False, merge=True, branch=True):
1526 def dirty(self, missing=False, merge=True, branch=True):
1527 "check whether a working directory is modified"
1527 "check whether a working directory is modified"
1528 # check subrepos first
1528 # check subrepos first
1529 for s in sorted(self.substate):
1529 for s in sorted(self.substate):
1530 if self.sub(s).dirty(missing=missing):
1530 if self.sub(s).dirty(missing=missing):
1531 return True
1531 return True
1532 # check current working dir
1532 # check current working dir
1533 return ((merge and self.p2()) or
1533 return ((merge and self.p2()) or
1534 (branch and self.branch() != self.p1().branch()) or
1534 (branch and self.branch() != self.p1().branch()) or
1535 self.modified() or self.added() or self.removed() or
1535 self.modified() or self.added() or self.removed() or
1536 (missing and self.deleted()))
1536 (missing and self.deleted()))
1537
1537
1538 def add(self, list, prefix=""):
1538 def add(self, list, prefix=""):
1539 with self._repo.wlock():
1539 with self._repo.wlock():
1540 ui, ds = self._repo.ui, self._repo.dirstate
1540 ui, ds = self._repo.ui, self._repo.dirstate
1541 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1541 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1542 rejected = []
1542 rejected = []
1543 lstat = self._repo.wvfs.lstat
1543 lstat = self._repo.wvfs.lstat
1544 for f in list:
1544 for f in list:
1545 # ds.pathto() returns an absolute file when this is invoked from
1545 # ds.pathto() returns an absolute file when this is invoked from
1546 # the keyword extension. That gets flagged as non-portable on
1546 # the keyword extension. That gets flagged as non-portable on
1547 # Windows, since it contains the drive letter and colon.
1547 # Windows, since it contains the drive letter and colon.
1548 scmutil.checkportable(ui, os.path.join(prefix, f))
1548 scmutil.checkportable(ui, os.path.join(prefix, f))
1549 try:
1549 try:
1550 st = lstat(f)
1550 st = lstat(f)
1551 except OSError:
1551 except OSError:
1552 ui.warn(_("%s does not exist!\n") % uipath(f))
1552 ui.warn(_("%s does not exist!\n") % uipath(f))
1553 rejected.append(f)
1553 rejected.append(f)
1554 continue
1554 continue
1555 if st.st_size > 10000000:
1555 if st.st_size > 10000000:
1556 ui.warn(_("%s: up to %d MB of RAM may be required "
1556 ui.warn(_("%s: up to %d MB of RAM may be required "
1557 "to manage this file\n"
1557 "to manage this file\n"
1558 "(use 'hg revert %s' to cancel the "
1558 "(use 'hg revert %s' to cancel the "
1559 "pending addition)\n")
1559 "pending addition)\n")
1560 % (f, 3 * st.st_size // 1000000, uipath(f)))
1560 % (f, 3 * st.st_size // 1000000, uipath(f)))
1561 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1561 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1562 ui.warn(_("%s not added: only files and symlinks "
1562 ui.warn(_("%s not added: only files and symlinks "
1563 "supported currently\n") % uipath(f))
1563 "supported currently\n") % uipath(f))
1564 rejected.append(f)
1564 rejected.append(f)
1565 elif ds[f] in 'amn':
1565 elif ds[f] in 'amn':
1566 ui.warn(_("%s already tracked!\n") % uipath(f))
1566 ui.warn(_("%s already tracked!\n") % uipath(f))
1567 elif ds[f] == 'r':
1567 elif ds[f] == 'r':
1568 ds.normallookup(f)
1568 ds.normallookup(f)
1569 else:
1569 else:
1570 ds.add(f)
1570 ds.add(f)
1571 return rejected
1571 return rejected
1572
1572
1573 def forget(self, files, prefix=""):
1573 def forget(self, files, prefix=""):
1574 with self._repo.wlock():
1574 with self._repo.wlock():
1575 ds = self._repo.dirstate
1575 ds = self._repo.dirstate
1576 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1576 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 rejected = []
1577 rejected = []
1578 for f in files:
1578 for f in files:
1579 if f not in self._repo.dirstate:
1579 if f not in self._repo.dirstate:
1580 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1580 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1581 rejected.append(f)
1581 rejected.append(f)
1582 elif self._repo.dirstate[f] != 'a':
1582 elif self._repo.dirstate[f] != 'a':
1583 self._repo.dirstate.remove(f)
1583 self._repo.dirstate.remove(f)
1584 else:
1584 else:
1585 self._repo.dirstate.drop(f)
1585 self._repo.dirstate.drop(f)
1586 return rejected
1586 return rejected
1587
1587
1588 def undelete(self, list):
1588 def undelete(self, list):
1589 pctxs = self.parents()
1589 pctxs = self.parents()
1590 with self._repo.wlock():
1590 with self._repo.wlock():
1591 ds = self._repo.dirstate
1591 ds = self._repo.dirstate
1592 for f in list:
1592 for f in list:
1593 if self._repo.dirstate[f] != 'r':
1593 if self._repo.dirstate[f] != 'r':
1594 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1594 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1595 else:
1595 else:
1596 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1596 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1597 t = fctx.data()
1597 t = fctx.data()
1598 self._repo.wwrite(f, t, fctx.flags())
1598 self._repo.wwrite(f, t, fctx.flags())
1599 self._repo.dirstate.normal(f)
1599 self._repo.dirstate.normal(f)
1600
1600
1601 def copy(self, source, dest):
1601 def copy(self, source, dest):
1602 try:
1602 try:
1603 st = self._repo.wvfs.lstat(dest)
1603 st = self._repo.wvfs.lstat(dest)
1604 except OSError as err:
1604 except OSError as err:
1605 if err.errno != errno.ENOENT:
1605 if err.errno != errno.ENOENT:
1606 raise
1606 raise
1607 self._repo.ui.warn(_("%s does not exist!\n")
1607 self._repo.ui.warn(_("%s does not exist!\n")
1608 % self._repo.dirstate.pathto(dest))
1608 % self._repo.dirstate.pathto(dest))
1609 return
1609 return
1610 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1610 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1611 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1611 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1612 "symbolic link\n")
1612 "symbolic link\n")
1613 % self._repo.dirstate.pathto(dest))
1613 % self._repo.dirstate.pathto(dest))
1614 else:
1614 else:
1615 with self._repo.wlock():
1615 with self._repo.wlock():
1616 if self._repo.dirstate[dest] in '?':
1616 if self._repo.dirstate[dest] in '?':
1617 self._repo.dirstate.add(dest)
1617 self._repo.dirstate.add(dest)
1618 elif self._repo.dirstate[dest] in 'r':
1618 elif self._repo.dirstate[dest] in 'r':
1619 self._repo.dirstate.normallookup(dest)
1619 self._repo.dirstate.normallookup(dest)
1620 self._repo.dirstate.copy(source, dest)
1620 self._repo.dirstate.copy(source, dest)
1621
1621
1622 def match(self, pats=None, include=None, exclude=None, default='glob',
1622 def match(self, pats=None, include=None, exclude=None, default='glob',
1623 listsubrepos=False, badfn=None):
1623 listsubrepos=False, badfn=None):
1624 r = self._repo
1624 r = self._repo
1625
1625
1626 # Only a case insensitive filesystem needs magic to translate user input
1626 # Only a case insensitive filesystem needs magic to translate user input
1627 # to actual case in the filesystem.
1627 # to actual case in the filesystem.
1628 icasefs = not util.fscasesensitive(r.root)
1628 icasefs = not util.fscasesensitive(r.root)
1629 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1629 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1630 default, auditor=r.auditor, ctx=self,
1630 default, auditor=r.auditor, ctx=self,
1631 listsubrepos=listsubrepos, badfn=badfn,
1631 listsubrepos=listsubrepos, badfn=badfn,
1632 icasefs=icasefs)
1632 icasefs=icasefs)
1633
1633
1634 def flushall(self):
1634 def flushall(self):
1635 pass # For overlayworkingfilectx compatibility.
1635 pass # For overlayworkingfilectx compatibility.
1636
1636
1637 def _filtersuspectsymlink(self, files):
1637 def _filtersuspectsymlink(self, files):
1638 if not files or self._repo.dirstate._checklink:
1638 if not files or self._repo.dirstate._checklink:
1639 return files
1639 return files
1640
1640
1641 # Symlink placeholders may get non-symlink-like contents
1641 # Symlink placeholders may get non-symlink-like contents
1642 # via user error or dereferencing by NFS or Samba servers,
1642 # via user error or dereferencing by NFS or Samba servers,
1643 # so we filter out any placeholders that don't look like a
1643 # so we filter out any placeholders that don't look like a
1644 # symlink
1644 # symlink
1645 sane = []
1645 sane = []
1646 for f in files:
1646 for f in files:
1647 if self.flags(f) == 'l':
1647 if self.flags(f) == 'l':
1648 d = self[f].data()
1648 d = self[f].data()
1649 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1649 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1650 self._repo.ui.debug('ignoring suspect symlink placeholder'
1650 self._repo.ui.debug('ignoring suspect symlink placeholder'
1651 ' "%s"\n' % f)
1651 ' "%s"\n' % f)
1652 continue
1652 continue
1653 sane.append(f)
1653 sane.append(f)
1654 return sane
1654 return sane
1655
1655
1656 def _checklookup(self, files):
1656 def _checklookup(self, files):
1657 # check for any possibly clean files
1657 # check for any possibly clean files
1658 if not files:
1658 if not files:
1659 return [], [], []
1659 return [], [], []
1660
1660
1661 modified = []
1661 modified = []
1662 deleted = []
1662 deleted = []
1663 fixup = []
1663 fixup = []
1664 pctx = self._parents[0]
1664 pctx = self._parents[0]
1665 # do a full compare of any files that might have changed
1665 # do a full compare of any files that might have changed
1666 for f in sorted(files):
1666 for f in sorted(files):
1667 try:
1667 try:
1668 # This will return True for a file that got replaced by a
1668 # This will return True for a file that got replaced by a
1669 # directory in the interim, but fixing that is pretty hard.
1669 # directory in the interim, but fixing that is pretty hard.
1670 if (f not in pctx or self.flags(f) != pctx.flags(f)
1670 if (f not in pctx or self.flags(f) != pctx.flags(f)
1671 or pctx[f].cmp(self[f])):
1671 or pctx[f].cmp(self[f])):
1672 modified.append(f)
1672 modified.append(f)
1673 else:
1673 else:
1674 fixup.append(f)
1674 fixup.append(f)
1675 except (IOError, OSError):
1675 except (IOError, OSError):
1676 # A file become inaccessible in between? Mark it as deleted,
1676 # A file become inaccessible in between? Mark it as deleted,
1677 # matching dirstate behavior (issue5584).
1677 # matching dirstate behavior (issue5584).
1678 # The dirstate has more complex behavior around whether a
1678 # The dirstate has more complex behavior around whether a
1679 # missing file matches a directory, etc, but we don't need to
1679 # missing file matches a directory, etc, but we don't need to
1680 # bother with that: if f has made it to this point, we're sure
1680 # bother with that: if f has made it to this point, we're sure
1681 # it's in the dirstate.
1681 # it's in the dirstate.
1682 deleted.append(f)
1682 deleted.append(f)
1683
1683
1684 return modified, deleted, fixup
1684 return modified, deleted, fixup
1685
1685
1686 def _poststatusfixup(self, status, fixup):
1686 def _poststatusfixup(self, status, fixup):
1687 """update dirstate for files that are actually clean"""
1687 """update dirstate for files that are actually clean"""
1688 poststatus = self._repo.postdsstatus()
1688 poststatus = self._repo.postdsstatus()
1689 if fixup or poststatus:
1689 if fixup or poststatus:
1690 try:
1690 try:
1691 oldid = self._repo.dirstate.identity()
1691 oldid = self._repo.dirstate.identity()
1692
1692
1693 # updating the dirstate is optional
1693 # updating the dirstate is optional
1694 # so we don't wait on the lock
1694 # so we don't wait on the lock
1695 # wlock can invalidate the dirstate, so cache normal _after_
1695 # wlock can invalidate the dirstate, so cache normal _after_
1696 # taking the lock
1696 # taking the lock
1697 with self._repo.wlock(False):
1697 with self._repo.wlock(False):
1698 if self._repo.dirstate.identity() == oldid:
1698 if self._repo.dirstate.identity() == oldid:
1699 if fixup:
1699 if fixup:
1700 normal = self._repo.dirstate.normal
1700 normal = self._repo.dirstate.normal
1701 for f in fixup:
1701 for f in fixup:
1702 normal(f)
1702 normal(f)
1703 # write changes out explicitly, because nesting
1703 # write changes out explicitly, because nesting
1704 # wlock at runtime may prevent 'wlock.release()'
1704 # wlock at runtime may prevent 'wlock.release()'
1705 # after this block from doing so for subsequent
1705 # after this block from doing so for subsequent
1706 # changing files
1706 # changing files
1707 tr = self._repo.currenttransaction()
1707 tr = self._repo.currenttransaction()
1708 self._repo.dirstate.write(tr)
1708 self._repo.dirstate.write(tr)
1709
1709
1710 if poststatus:
1710 if poststatus:
1711 for ps in poststatus:
1711 for ps in poststatus:
1712 ps(self, status)
1712 ps(self, status)
1713 else:
1713 else:
1714 # in this case, writing changes out breaks
1714 # in this case, writing changes out breaks
1715 # consistency, because .hg/dirstate was
1715 # consistency, because .hg/dirstate was
1716 # already changed simultaneously after last
1716 # already changed simultaneously after last
1717 # caching (see also issue5584 for detail)
1717 # caching (see also issue5584 for detail)
1718 self._repo.ui.debug('skip updating dirstate: '
1718 self._repo.ui.debug('skip updating dirstate: '
1719 'identity mismatch\n')
1719 'identity mismatch\n')
1720 except error.LockError:
1720 except error.LockError:
1721 pass
1721 pass
1722 finally:
1722 finally:
1723 # Even if the wlock couldn't be grabbed, clear out the list.
1723 # Even if the wlock couldn't be grabbed, clear out the list.
1724 self._repo.clearpostdsstatus()
1724 self._repo.clearpostdsstatus()
1725
1725
1726 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1726 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1727 '''Gets the status from the dirstate -- internal use only.'''
1727 '''Gets the status from the dirstate -- internal use only.'''
1728 subrepos = []
1728 subrepos = []
1729 if '.hgsub' in self:
1729 if '.hgsub' in self:
1730 subrepos = sorted(self.substate)
1730 subrepos = sorted(self.substate)
1731 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1731 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1732 clean=clean, unknown=unknown)
1732 clean=clean, unknown=unknown)
1733
1733
1734 # check for any possibly clean files
1734 # check for any possibly clean files
1735 fixup = []
1735 fixup = []
1736 if cmp:
1736 if cmp:
1737 modified2, deleted2, fixup = self._checklookup(cmp)
1737 modified2, deleted2, fixup = self._checklookup(cmp)
1738 s.modified.extend(modified2)
1738 s.modified.extend(modified2)
1739 s.deleted.extend(deleted2)
1739 s.deleted.extend(deleted2)
1740
1740
1741 if fixup and clean:
1741 if fixup and clean:
1742 s.clean.extend(fixup)
1742 s.clean.extend(fixup)
1743
1743
1744 self._poststatusfixup(s, fixup)
1744 self._poststatusfixup(s, fixup)
1745
1745
1746 if match.always():
1746 if match.always():
1747 # cache for performance
1747 # cache for performance
1748 if s.unknown or s.ignored or s.clean:
1748 if s.unknown or s.ignored or s.clean:
1749 # "_status" is cached with list*=False in the normal route
1749 # "_status" is cached with list*=False in the normal route
1750 self._status = scmutil.status(s.modified, s.added, s.removed,
1750 self._status = scmutil.status(s.modified, s.added, s.removed,
1751 s.deleted, [], [], [])
1751 s.deleted, [], [], [])
1752 else:
1752 else:
1753 self._status = s
1753 self._status = s
1754
1754
1755 return s
1755 return s
1756
1756
1757 @propertycache
1757 @propertycache
1758 def _manifest(self):
1758 def _manifest(self):
1759 """generate a manifest corresponding to the values in self._status
1759 """generate a manifest corresponding to the values in self._status
1760
1760
1761 This reuse the file nodeid from parent, but we use special node
1761 This reuse the file nodeid from parent, but we use special node
1762 identifiers for added and modified files. This is used by manifests
1762 identifiers for added and modified files. This is used by manifests
1763 merge to see that files are different and by update logic to avoid
1763 merge to see that files are different and by update logic to avoid
1764 deleting newly added files.
1764 deleting newly added files.
1765 """
1765 """
1766 return self._buildstatusmanifest(self._status)
1766 return self._buildstatusmanifest(self._status)
1767
1767
1768 def _buildstatusmanifest(self, status):
1768 def _buildstatusmanifest(self, status):
1769 """Builds a manifest that includes the given status results."""
1769 """Builds a manifest that includes the given status results."""
1770 parents = self.parents()
1770 parents = self.parents()
1771
1771
1772 man = parents[0].manifest().copy()
1772 man = parents[0].manifest().copy()
1773
1773
1774 ff = self._flagfunc
1774 ff = self._flagfunc
1775 for i, l in ((addednodeid, status.added),
1775 for i, l in ((addednodeid, status.added),
1776 (modifiednodeid, status.modified)):
1776 (modifiednodeid, status.modified)):
1777 for f in l:
1777 for f in l:
1778 man[f] = i
1778 man[f] = i
1779 try:
1779 try:
1780 man.setflag(f, ff(f))
1780 man.setflag(f, ff(f))
1781 except OSError:
1781 except OSError:
1782 pass
1782 pass
1783
1783
1784 for f in status.deleted + status.removed:
1784 for f in status.deleted + status.removed:
1785 if f in man:
1785 if f in man:
1786 del man[f]
1786 del man[f]
1787
1787
1788 return man
1788 return man
1789
1789
1790 def _buildstatus(self, other, s, match, listignored, listclean,
1790 def _buildstatus(self, other, s, match, listignored, listclean,
1791 listunknown):
1791 listunknown):
1792 """build a status with respect to another context
1792 """build a status with respect to another context
1793
1793
1794 This includes logic for maintaining the fast path of status when
1794 This includes logic for maintaining the fast path of status when
1795 comparing the working directory against its parent, which is to skip
1795 comparing the working directory against its parent, which is to skip
1796 building a new manifest if self (working directory) is not comparing
1796 building a new manifest if self (working directory) is not comparing
1797 against its parent (repo['.']).
1797 against its parent (repo['.']).
1798 """
1798 """
1799 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1799 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1800 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1800 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1801 # might have accidentally ended up with the entire contents of the file
1801 # might have accidentally ended up with the entire contents of the file
1802 # they are supposed to be linking to.
1802 # they are supposed to be linking to.
1803 s.modified[:] = self._filtersuspectsymlink(s.modified)
1803 s.modified[:] = self._filtersuspectsymlink(s.modified)
1804 if other != self._repo['.']:
1804 if other != self._repo['.']:
1805 s = super(workingctx, self)._buildstatus(other, s, match,
1805 s = super(workingctx, self)._buildstatus(other, s, match,
1806 listignored, listclean,
1806 listignored, listclean,
1807 listunknown)
1807 listunknown)
1808 return s
1808 return s
1809
1809
1810 def _matchstatus(self, other, match):
1810 def _matchstatus(self, other, match):
1811 """override the match method with a filter for directory patterns
1811 """override the match method with a filter for directory patterns
1812
1812
1813 We use inheritance to customize the match.bad method only in cases of
1813 We use inheritance to customize the match.bad method only in cases of
1814 workingctx since it belongs only to the working directory when
1814 workingctx since it belongs only to the working directory when
1815 comparing against the parent changeset.
1815 comparing against the parent changeset.
1816
1816
1817 If we aren't comparing against the working directory's parent, then we
1817 If we aren't comparing against the working directory's parent, then we
1818 just use the default match object sent to us.
1818 just use the default match object sent to us.
1819 """
1819 """
1820 if other != self._repo['.']:
1820 if other != self._repo['.']:
1821 def bad(f, msg):
1821 def bad(f, msg):
1822 # 'f' may be a directory pattern from 'match.files()',
1822 # 'f' may be a directory pattern from 'match.files()',
1823 # so 'f not in ctx1' is not enough
1823 # so 'f not in ctx1' is not enough
1824 if f not in other and not other.hasdir(f):
1824 if f not in other and not other.hasdir(f):
1825 self._repo.ui.warn('%s: %s\n' %
1825 self._repo.ui.warn('%s: %s\n' %
1826 (self._repo.dirstate.pathto(f), msg))
1826 (self._repo.dirstate.pathto(f), msg))
1827 match.bad = bad
1827 match.bad = bad
1828 return match
1828 return match
1829
1829
1830 def markcommitted(self, node):
1830 def markcommitted(self, node):
1831 super(workingctx, self).markcommitted(node)
1831 super(workingctx, self).markcommitted(node)
1832
1832
1833 sparse.aftercommit(self._repo, node)
1833 sparse.aftercommit(self._repo, node)
1834
1834
1835 class committablefilectx(basefilectx):
1835 class committablefilectx(basefilectx):
1836 """A committablefilectx provides common functionality for a file context
1836 """A committablefilectx provides common functionality for a file context
1837 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1837 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1838 def __init__(self, repo, path, filelog=None, ctx=None):
1838 def __init__(self, repo, path, filelog=None, ctx=None):
1839 self._repo = repo
1839 self._repo = repo
1840 self._path = path
1840 self._path = path
1841 self._changeid = None
1841 self._changeid = None
1842 self._filerev = self._filenode = None
1842 self._filerev = self._filenode = None
1843
1843
1844 if filelog is not None:
1844 if filelog is not None:
1845 self._filelog = filelog
1845 self._filelog = filelog
1846 if ctx:
1846 if ctx:
1847 self._changectx = ctx
1847 self._changectx = ctx
1848
1848
1849 def __nonzero__(self):
1849 def __nonzero__(self):
1850 return True
1850 return True
1851
1851
1852 __bool__ = __nonzero__
1852 __bool__ = __nonzero__
1853
1853
1854 def linkrev(self):
1854 def linkrev(self):
1855 # linked to self._changectx no matter if file is modified or not
1855 # linked to self._changectx no matter if file is modified or not
1856 return self.rev()
1856 return self.rev()
1857
1857
1858 def parents(self):
1858 def parents(self):
1859 '''return parent filectxs, following copies if necessary'''
1859 '''return parent filectxs, following copies if necessary'''
1860 def filenode(ctx, path):
1860 def filenode(ctx, path):
1861 return ctx._manifest.get(path, nullid)
1861 return ctx._manifest.get(path, nullid)
1862
1862
1863 path = self._path
1863 path = self._path
1864 fl = self._filelog
1864 fl = self._filelog
1865 pcl = self._changectx._parents
1865 pcl = self._changectx._parents
1866 renamed = self.renamed()
1866 renamed = self.renamed()
1867
1867
1868 if renamed:
1868 if renamed:
1869 pl = [renamed + (None,)]
1869 pl = [renamed + (None,)]
1870 else:
1870 else:
1871 pl = [(path, filenode(pcl[0], path), fl)]
1871 pl = [(path, filenode(pcl[0], path), fl)]
1872
1872
1873 for pc in pcl[1:]:
1873 for pc in pcl[1:]:
1874 pl.append((path, filenode(pc, path), fl))
1874 pl.append((path, filenode(pc, path), fl))
1875
1875
1876 return [self._parentfilectx(p, fileid=n, filelog=l)
1876 return [self._parentfilectx(p, fileid=n, filelog=l)
1877 for p, n, l in pl if n != nullid]
1877 for p, n, l in pl if n != nullid]
1878
1878
1879 def children(self):
1879 def children(self):
1880 return []
1880 return []
1881
1881
1882 class workingfilectx(committablefilectx):
1882 class workingfilectx(committablefilectx):
1883 """A workingfilectx object makes access to data related to a particular
1883 """A workingfilectx object makes access to data related to a particular
1884 file in the working directory convenient."""
1884 file in the working directory convenient."""
1885 def __init__(self, repo, path, filelog=None, workingctx=None):
1885 def __init__(self, repo, path, filelog=None, workingctx=None):
1886 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1886 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1887
1887
1888 @propertycache
1888 @propertycache
1889 def _changectx(self):
1889 def _changectx(self):
1890 return workingctx(self._repo)
1890 return workingctx(self._repo)
1891
1891
1892 def data(self):
1892 def data(self):
1893 return self._repo.wread(self._path)
1893 return self._repo.wread(self._path)
1894 def renamed(self):
1894 def renamed(self):
1895 rp = self._repo.dirstate.copied(self._path)
1895 rp = self._repo.dirstate.copied(self._path)
1896 if not rp:
1896 if not rp:
1897 return None
1897 return None
1898 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1898 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1899
1899
1900 def size(self):
1900 def size(self):
1901 return self._repo.wvfs.lstat(self._path).st_size
1901 return self._repo.wvfs.lstat(self._path).st_size
1902 def date(self):
1902 def date(self):
1903 t, tz = self._changectx.date()
1903 t, tz = self._changectx.date()
1904 try:
1904 try:
1905 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1905 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1906 except OSError as err:
1906 except OSError as err:
1907 if err.errno != errno.ENOENT:
1907 if err.errno != errno.ENOENT:
1908 raise
1908 raise
1909 return (t, tz)
1909 return (t, tz)
1910
1910
1911 def exists(self):
1911 def exists(self):
1912 return self._repo.wvfs.exists(self._path)
1912 return self._repo.wvfs.exists(self._path)
1913
1913
1914 def lexists(self):
1914 def lexists(self):
1915 return self._repo.wvfs.lexists(self._path)
1915 return self._repo.wvfs.lexists(self._path)
1916
1916
1917 def audit(self):
1917 def audit(self):
1918 return self._repo.wvfs.audit(self._path)
1918 return self._repo.wvfs.audit(self._path)
1919
1919
1920 def cmp(self, fctx):
1920 def cmp(self, fctx):
1921 """compare with other file context
1921 """compare with other file context
1922
1922
1923 returns True if different than fctx.
1923 returns True if different than fctx.
1924 """
1924 """
1925 # fctx should be a filectx (not a workingfilectx)
1925 # fctx should be a filectx (not a workingfilectx)
1926 # invert comparison to reuse the same code path
1926 # invert comparison to reuse the same code path
1927 return fctx.cmp(self)
1927 return fctx.cmp(self)
1928
1928
1929 def remove(self, ignoremissing=False):
1929 def remove(self, ignoremissing=False):
1930 """wraps unlink for a repo's working directory"""
1930 """wraps unlink for a repo's working directory"""
1931 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1931 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1932
1932
1933 def write(self, data, flags, backgroundclose=False):
1933 def write(self, data, flags, backgroundclose=False):
1934 """wraps repo.wwrite"""
1934 """wraps repo.wwrite"""
1935 self._repo.wwrite(self._path, data, flags,
1935 self._repo.wwrite(self._path, data, flags,
1936 backgroundclose=backgroundclose)
1936 backgroundclose=backgroundclose)
1937
1937
1938 def clearunknown(self):
1938 def clearunknown(self):
1939 """Removes conflicting items in the working directory so that
1939 """Removes conflicting items in the working directory so that
1940 ``write()`` can be called successfully.
1940 ``write()`` can be called successfully.
1941 """
1941 """
1942 wvfs = self._repo.wvfs
1942 wvfs = self._repo.wvfs
1943 f = self._path
1943 f = self._path
1944 if wvfs.isdir(f) and not wvfs.islink(f):
1944 if wvfs.isdir(f) and not wvfs.islink(f):
1945 wvfs.rmtree(f, forcibly=True)
1945 wvfs.rmtree(f, forcibly=True)
1946 for p in reversed(list(util.finddirs(f))):
1946 for p in reversed(list(util.finddirs(f))):
1947 if wvfs.isfileorlink(p):
1947 if wvfs.isfileorlink(p):
1948 wvfs.unlink(p)
1948 wvfs.unlink(p)
1949 break
1949 break
1950
1950
1951 def setflags(self, l, x):
1951 def setflags(self, l, x):
1952 self._repo.wvfs.setflags(self._path, l, x)
1952 self._repo.wvfs.setflags(self._path, l, x)
1953
1953
1954 class overlayworkingctx(workingctx):
1954 class overlayworkingctx(workingctx):
1955 """Wraps another mutable context with a write-back cache that can be flushed
1955 """Wraps another mutable context with a write-back cache that can be flushed
1956 at a later time.
1956 at a later time.
1957
1957
1958 self._cache[path] maps to a dict with keys: {
1958 self._cache[path] maps to a dict with keys: {
1959 'exists': bool?
1959 'exists': bool?
1960 'date': date?
1960 'date': date?
1961 'data': str?
1961 'data': str?
1962 'flags': str?
1962 'flags': str?
1963 }
1963 }
1964 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1964 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1965 is `False`, the file was deleted.
1965 is `False`, the file was deleted.
1966 """
1966 """
1967
1967
1968 def __init__(self, repo, wrappedctx):
1968 def __init__(self, repo, wrappedctx):
1969 super(overlayworkingctx, self).__init__(repo)
1969 super(overlayworkingctx, self).__init__(repo)
1970 self._repo = repo
1970 self._repo = repo
1971 self._wrappedctx = wrappedctx
1971 self._wrappedctx = wrappedctx
1972 self._clean()
1972 self._clean()
1973
1973
1974 def data(self, path):
1974 def data(self, path):
1975 if self.isdirty(path):
1975 if self.isdirty(path):
1976 if self._cache[path]['exists']:
1976 if self._cache[path]['exists']:
1977 if self._cache[path]['data']:
1977 if self._cache[path]['data']:
1978 return self._cache[path]['data']
1978 return self._cache[path]['data']
1979 else:
1979 else:
1980 # Must fallback here, too, because we only set flags.
1980 # Must fallback here, too, because we only set flags.
1981 return self._wrappedctx[path].data()
1981 return self._wrappedctx[path].data()
1982 else:
1982 else:
1983 raise error.ProgrammingError("No such file or directory: %s" %
1983 raise error.ProgrammingError("No such file or directory: %s" %
1984 self._path)
1984 self._path)
1985 else:
1985 else:
1986 return self._wrappedctx[path].data()
1986 return self._wrappedctx[path].data()
1987
1987
1988 def isinmemory(self):
1988 def isinmemory(self):
1989 return True
1989 return True
1990
1990
1991 def filedate(self, path):
1991 def filedate(self, path):
1992 if self.isdirty(path):
1992 if self.isdirty(path):
1993 return self._cache[path]['date']
1993 return self._cache[path]['date']
1994 else:
1994 else:
1995 return self._wrappedctx[path].date()
1995 return self._wrappedctx[path].date()
1996
1996
1997 def flags(self, path):
1997 def flags(self, path):
1998 if self.isdirty(path):
1998 if self.isdirty(path):
1999 if self._cache[path]['exists']:
1999 if self._cache[path]['exists']:
2000 return self._cache[path]['flags']
2000 return self._cache[path]['flags']
2001 else:
2001 else:
2002 raise error.ProgrammingError("No such file or directory: %s" %
2002 raise error.ProgrammingError("No such file or directory: %s" %
2003 self._path)
2003 self._path)
2004 else:
2004 else:
2005 return self._wrappedctx[path].flags()
2005 return self._wrappedctx[path].flags()
2006
2006
2007 def write(self, path, data, flags=''):
2007 def write(self, path, data, flags=''):
2008 if data is None:
2008 if data is None:
2009 raise error.ProgrammingError("data must be non-None")
2009 raise error.ProgrammingError("data must be non-None")
2010 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2010 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2011 flags=flags)
2011 flags=flags)
2012
2012
2013 def setflags(self, path, l, x):
2013 def setflags(self, path, l, x):
2014 self._markdirty(path, exists=True, date=util.makedate(),
2014 self._markdirty(path, exists=True, date=util.makedate(),
2015 flags=(l and 'l' or '') + (x and 'x' or ''))
2015 flags=(l and 'l' or '') + (x and 'x' or ''))
2016
2016
2017 def remove(self, path):
2017 def remove(self, path):
2018 self._markdirty(path, exists=False)
2018 self._markdirty(path, exists=False)
2019
2019
2020 def exists(self, path):
2020 def exists(self, path):
2021 """exists behaves like `lexists`, but needs to follow symlinks and
2021 """exists behaves like `lexists`, but needs to follow symlinks and
2022 return False if they are broken.
2022 return False if they are broken.
2023 """
2023 """
2024 if self.isdirty(path):
2024 if self.isdirty(path):
2025 # If this path exists and is a symlink, "follow" it by calling
2025 # If this path exists and is a symlink, "follow" it by calling
2026 # exists on the destination path.
2026 # exists on the destination path.
2027 if (self._cache[path]['exists'] and
2027 if (self._cache[path]['exists'] and
2028 'l' in self._cache[path]['flags']):
2028 'l' in self._cache[path]['flags']):
2029 return self.exists(self._cache[path]['data'].strip())
2029 return self.exists(self._cache[path]['data'].strip())
2030 else:
2030 else:
2031 return self._cache[path]['exists']
2031 return self._cache[path]['exists']
2032 return self._wrappedctx[path].exists()
2032 return self._wrappedctx[path].exists()
2033
2033
2034 def lexists(self, path):
2034 def lexists(self, path):
2035 """lexists returns True if the path exists"""
2035 """lexists returns True if the path exists"""
2036 if self.isdirty(path):
2036 if self.isdirty(path):
2037 return self._cache[path]['exists']
2037 return self._cache[path]['exists']
2038 return self._wrappedctx[path].lexists()
2038 return self._wrappedctx[path].lexists()
2039
2039
2040 def size(self, path):
2040 def size(self, path):
2041 if self.isdirty(path):
2041 if self.isdirty(path):
2042 if self._cache[path]['exists']:
2042 if self._cache[path]['exists']:
2043 return len(self._cache[path]['data'])
2043 return len(self._cache[path]['data'])
2044 else:
2044 else:
2045 raise error.ProgrammingError("No such file or directory: %s" %
2045 raise error.ProgrammingError("No such file or directory: %s" %
2046 self._path)
2046 self._path)
2047 return self._wrappedctx[path].size()
2047 return self._wrappedctx[path].size()
2048
2048
2049 def flushall(self):
2049 def flushall(self):
2050 for path in self._writeorder:
2050 for path in self._writeorder:
2051 entry = self._cache[path]
2051 entry = self._cache[path]
2052 if entry['exists']:
2052 if entry['exists']:
2053 self._wrappedctx[path].clearunknown()
2053 self._wrappedctx[path].clearunknown()
2054 if entry['data'] is not None:
2054 if entry['data'] is not None:
2055 if entry['flags'] is None:
2055 if entry['flags'] is None:
2056 raise error.ProgrammingError('data set but not flags')
2056 raise error.ProgrammingError('data set but not flags')
2057 self._wrappedctx[path].write(
2057 self._wrappedctx[path].write(
2058 entry['data'],
2058 entry['data'],
2059 entry['flags'])
2059 entry['flags'])
2060 else:
2060 else:
2061 self._wrappedctx[path].setflags(
2061 self._wrappedctx[path].setflags(
2062 'l' in entry['flags'],
2062 'l' in entry['flags'],
2063 'x' in entry['flags'])
2063 'x' in entry['flags'])
2064 else:
2064 else:
2065 self._wrappedctx[path].remove(path)
2065 self._wrappedctx[path].remove(path)
2066 self._clean()
2066 self._clean()
2067
2067
2068 def isdirty(self, path):
2068 def isdirty(self, path):
2069 return path in self._cache
2069 return path in self._cache
2070
2070
2071 def _clean(self):
2071 def _clean(self):
2072 self._cache = {}
2072 self._cache = {}
2073 self._writeorder = []
2073 self._writeorder = []
2074
2074
2075 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2075 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2076 if path not in self._cache:
2076 if path not in self._cache:
2077 self._writeorder.append(path)
2077 self._writeorder.append(path)
2078
2078
2079 self._cache[path] = {
2079 self._cache[path] = {
2080 'exists': exists,
2080 'exists': exists,
2081 'data': data,
2081 'data': data,
2082 'date': date,
2082 'date': date,
2083 'flags': flags,
2083 'flags': flags,
2084 }
2084 }
2085
2085
2086 def filectx(self, path, filelog=None):
2086 def filectx(self, path, filelog=None):
2087 return overlayworkingfilectx(self._repo, path, parent=self,
2087 return overlayworkingfilectx(self._repo, path, parent=self,
2088 filelog=filelog)
2088 filelog=filelog)
2089
2089
2090 class overlayworkingfilectx(workingfilectx):
2090 class overlayworkingfilectx(workingfilectx):
2091 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2091 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2092 cache, which can be flushed through later by calling ``flush()``."""
2092 cache, which can be flushed through later by calling ``flush()``."""
2093
2093
2094 def __init__(self, repo, path, filelog=None, parent=None):
2094 def __init__(self, repo, path, filelog=None, parent=None):
2095 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2095 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2096 parent)
2096 parent)
2097 self._repo = repo
2097 self._repo = repo
2098 self._parent = parent
2098 self._parent = parent
2099 self._path = path
2099 self._path = path
2100
2100
2101 def cmp(self, fctx):
2102 return self.data() != fctx.data()
2103
2101 def ctx(self):
2104 def ctx(self):
2102 return self._parent
2105 return self._parent
2103
2106
2104 def data(self):
2107 def data(self):
2105 return self._parent.data(self._path)
2108 return self._parent.data(self._path)
2106
2109
2107 def date(self):
2110 def date(self):
2108 return self._parent.filedate(self._path)
2111 return self._parent.filedate(self._path)
2109
2112
2110 def exists(self):
2113 def exists(self):
2111 return self.lexists()
2114 return self.lexists()
2112
2115
2113 def lexists(self):
2116 def lexists(self):
2114 return self._parent.exists(self._path)
2117 return self._parent.exists(self._path)
2115
2118
2116 def renamed(self):
2119 def renamed(self):
2117 # Copies are currently tracked in the dirstate as before. Straight copy
2120 # Copies are currently tracked in the dirstate as before. Straight copy
2118 # from workingfilectx.
2121 # from workingfilectx.
2119 rp = self._repo.dirstate.copied(self._path)
2122 rp = self._repo.dirstate.copied(self._path)
2120 if not rp:
2123 if not rp:
2121 return None
2124 return None
2122 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2125 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2123
2126
2124 def size(self):
2127 def size(self):
2125 return self._parent.size(self._path)
2128 return self._parent.size(self._path)
2126
2129
2127 def audit(self):
2130 def audit(self):
2128 pass
2131 pass
2129
2132
2130 def flags(self):
2133 def flags(self):
2131 return self._parent.flags(self._path)
2134 return self._parent.flags(self._path)
2132
2135
2133 def setflags(self, islink, isexec):
2136 def setflags(self, islink, isexec):
2134 return self._parent.setflags(self._path, islink, isexec)
2137 return self._parent.setflags(self._path, islink, isexec)
2135
2138
2136 def write(self, data, flags, backgroundclose=False):
2139 def write(self, data, flags, backgroundclose=False):
2137 return self._parent.write(self._path, data, flags)
2140 return self._parent.write(self._path, data, flags)
2138
2141
2139 def remove(self, ignoremissing=False):
2142 def remove(self, ignoremissing=False):
2140 return self._parent.remove(self._path)
2143 return self._parent.remove(self._path)
2141
2144
2142 class workingcommitctx(workingctx):
2145 class workingcommitctx(workingctx):
2143 """A workingcommitctx object makes access to data related to
2146 """A workingcommitctx object makes access to data related to
2144 the revision being committed convenient.
2147 the revision being committed convenient.
2145
2148
2146 This hides changes in the working directory, if they aren't
2149 This hides changes in the working directory, if they aren't
2147 committed in this context.
2150 committed in this context.
2148 """
2151 """
2149 def __init__(self, repo, changes,
2152 def __init__(self, repo, changes,
2150 text="", user=None, date=None, extra=None):
2153 text="", user=None, date=None, extra=None):
2151 super(workingctx, self).__init__(repo, text, user, date, extra,
2154 super(workingctx, self).__init__(repo, text, user, date, extra,
2152 changes)
2155 changes)
2153
2156
2154 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2157 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2155 """Return matched files only in ``self._status``
2158 """Return matched files only in ``self._status``
2156
2159
2157 Uncommitted files appear "clean" via this context, even if
2160 Uncommitted files appear "clean" via this context, even if
2158 they aren't actually so in the working directory.
2161 they aren't actually so in the working directory.
2159 """
2162 """
2160 if clean:
2163 if clean:
2161 clean = [f for f in self._manifest if f not in self._changedset]
2164 clean = [f for f in self._manifest if f not in self._changedset]
2162 else:
2165 else:
2163 clean = []
2166 clean = []
2164 return scmutil.status([f for f in self._status.modified if match(f)],
2167 return scmutil.status([f for f in self._status.modified if match(f)],
2165 [f for f in self._status.added if match(f)],
2168 [f for f in self._status.added if match(f)],
2166 [f for f in self._status.removed if match(f)],
2169 [f for f in self._status.removed if match(f)],
2167 [], [], [], clean)
2170 [], [], [], clean)
2168
2171
2169 @propertycache
2172 @propertycache
2170 def _changedset(self):
2173 def _changedset(self):
2171 """Return the set of files changed in this context
2174 """Return the set of files changed in this context
2172 """
2175 """
2173 changed = set(self._status.modified)
2176 changed = set(self._status.modified)
2174 changed.update(self._status.added)
2177 changed.update(self._status.added)
2175 changed.update(self._status.removed)
2178 changed.update(self._status.removed)
2176 return changed
2179 return changed
2177
2180
2178 def makecachingfilectxfn(func):
2181 def makecachingfilectxfn(func):
2179 """Create a filectxfn that caches based on the path.
2182 """Create a filectxfn that caches based on the path.
2180
2183
2181 We can't use util.cachefunc because it uses all arguments as the cache
2184 We can't use util.cachefunc because it uses all arguments as the cache
2182 key and this creates a cycle since the arguments include the repo and
2185 key and this creates a cycle since the arguments include the repo and
2183 memctx.
2186 memctx.
2184 """
2187 """
2185 cache = {}
2188 cache = {}
2186
2189
2187 def getfilectx(repo, memctx, path):
2190 def getfilectx(repo, memctx, path):
2188 if path not in cache:
2191 if path not in cache:
2189 cache[path] = func(repo, memctx, path)
2192 cache[path] = func(repo, memctx, path)
2190 return cache[path]
2193 return cache[path]
2191
2194
2192 return getfilectx
2195 return getfilectx
2193
2196
2194 def memfilefromctx(ctx):
2197 def memfilefromctx(ctx):
2195 """Given a context return a memfilectx for ctx[path]
2198 """Given a context return a memfilectx for ctx[path]
2196
2199
2197 This is a convenience method for building a memctx based on another
2200 This is a convenience method for building a memctx based on another
2198 context.
2201 context.
2199 """
2202 """
2200 def getfilectx(repo, memctx, path):
2203 def getfilectx(repo, memctx, path):
2201 fctx = ctx[path]
2204 fctx = ctx[path]
2202 # this is weird but apparently we only keep track of one parent
2205 # this is weird but apparently we only keep track of one parent
2203 # (why not only store that instead of a tuple?)
2206 # (why not only store that instead of a tuple?)
2204 copied = fctx.renamed()
2207 copied = fctx.renamed()
2205 if copied:
2208 if copied:
2206 copied = copied[0]
2209 copied = copied[0]
2207 return memfilectx(repo, path, fctx.data(),
2210 return memfilectx(repo, path, fctx.data(),
2208 islink=fctx.islink(), isexec=fctx.isexec(),
2211 islink=fctx.islink(), isexec=fctx.isexec(),
2209 copied=copied, memctx=memctx)
2212 copied=copied, memctx=memctx)
2210
2213
2211 return getfilectx
2214 return getfilectx
2212
2215
2213 def memfilefrompatch(patchstore):
2216 def memfilefrompatch(patchstore):
2214 """Given a patch (e.g. patchstore object) return a memfilectx
2217 """Given a patch (e.g. patchstore object) return a memfilectx
2215
2218
2216 This is a convenience method for building a memctx based on a patchstore.
2219 This is a convenience method for building a memctx based on a patchstore.
2217 """
2220 """
2218 def getfilectx(repo, memctx, path):
2221 def getfilectx(repo, memctx, path):
2219 data, mode, copied = patchstore.getfile(path)
2222 data, mode, copied = patchstore.getfile(path)
2220 if data is None:
2223 if data is None:
2221 return None
2224 return None
2222 islink, isexec = mode
2225 islink, isexec = mode
2223 return memfilectx(repo, path, data, islink=islink,
2226 return memfilectx(repo, path, data, islink=islink,
2224 isexec=isexec, copied=copied,
2227 isexec=isexec, copied=copied,
2225 memctx=memctx)
2228 memctx=memctx)
2226
2229
2227 return getfilectx
2230 return getfilectx
2228
2231
2229 class memctx(committablectx):
2232 class memctx(committablectx):
2230 """Use memctx to perform in-memory commits via localrepo.commitctx().
2233 """Use memctx to perform in-memory commits via localrepo.commitctx().
2231
2234
2232 Revision information is supplied at initialization time while
2235 Revision information is supplied at initialization time while
2233 related files data and is made available through a callback
2236 related files data and is made available through a callback
2234 mechanism. 'repo' is the current localrepo, 'parents' is a
2237 mechanism. 'repo' is the current localrepo, 'parents' is a
2235 sequence of two parent revisions identifiers (pass None for every
2238 sequence of two parent revisions identifiers (pass None for every
2236 missing parent), 'text' is the commit message and 'files' lists
2239 missing parent), 'text' is the commit message and 'files' lists
2237 names of files touched by the revision (normalized and relative to
2240 names of files touched by the revision (normalized and relative to
2238 repository root).
2241 repository root).
2239
2242
2240 filectxfn(repo, memctx, path) is a callable receiving the
2243 filectxfn(repo, memctx, path) is a callable receiving the
2241 repository, the current memctx object and the normalized path of
2244 repository, the current memctx object and the normalized path of
2242 requested file, relative to repository root. It is fired by the
2245 requested file, relative to repository root. It is fired by the
2243 commit function for every file in 'files', but calls order is
2246 commit function for every file in 'files', but calls order is
2244 undefined. If the file is available in the revision being
2247 undefined. If the file is available in the revision being
2245 committed (updated or added), filectxfn returns a memfilectx
2248 committed (updated or added), filectxfn returns a memfilectx
2246 object. If the file was removed, filectxfn return None for recent
2249 object. If the file was removed, filectxfn return None for recent
2247 Mercurial. Moved files are represented by marking the source file
2250 Mercurial. Moved files are represented by marking the source file
2248 removed and the new file added with copy information (see
2251 removed and the new file added with copy information (see
2249 memfilectx).
2252 memfilectx).
2250
2253
2251 user receives the committer name and defaults to current
2254 user receives the committer name and defaults to current
2252 repository username, date is the commit date in any format
2255 repository username, date is the commit date in any format
2253 supported by util.parsedate() and defaults to current date, extra
2256 supported by util.parsedate() and defaults to current date, extra
2254 is a dictionary of metadata or is left empty.
2257 is a dictionary of metadata or is left empty.
2255 """
2258 """
2256
2259
2257 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2260 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2258 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2261 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2259 # this field to determine what to do in filectxfn.
2262 # this field to determine what to do in filectxfn.
2260 _returnnoneformissingfiles = True
2263 _returnnoneformissingfiles = True
2261
2264
2262 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2265 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2263 date=None, extra=None, branch=None, editor=False):
2266 date=None, extra=None, branch=None, editor=False):
2264 super(memctx, self).__init__(repo, text, user, date, extra)
2267 super(memctx, self).__init__(repo, text, user, date, extra)
2265 self._rev = None
2268 self._rev = None
2266 self._node = None
2269 self._node = None
2267 parents = [(p or nullid) for p in parents]
2270 parents = [(p or nullid) for p in parents]
2268 p1, p2 = parents
2271 p1, p2 = parents
2269 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2272 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2270 files = sorted(set(files))
2273 files = sorted(set(files))
2271 self._files = files
2274 self._files = files
2272 if branch is not None:
2275 if branch is not None:
2273 self._extra['branch'] = encoding.fromlocal(branch)
2276 self._extra['branch'] = encoding.fromlocal(branch)
2274 self.substate = {}
2277 self.substate = {}
2275
2278
2276 if isinstance(filectxfn, patch.filestore):
2279 if isinstance(filectxfn, patch.filestore):
2277 filectxfn = memfilefrompatch(filectxfn)
2280 filectxfn = memfilefrompatch(filectxfn)
2278 elif not callable(filectxfn):
2281 elif not callable(filectxfn):
2279 # if store is not callable, wrap it in a function
2282 # if store is not callable, wrap it in a function
2280 filectxfn = memfilefromctx(filectxfn)
2283 filectxfn = memfilefromctx(filectxfn)
2281
2284
2282 # memoizing increases performance for e.g. vcs convert scenarios.
2285 # memoizing increases performance for e.g. vcs convert scenarios.
2283 self._filectxfn = makecachingfilectxfn(filectxfn)
2286 self._filectxfn = makecachingfilectxfn(filectxfn)
2284
2287
2285 if editor:
2288 if editor:
2286 self._text = editor(self._repo, self, [])
2289 self._text = editor(self._repo, self, [])
2287 self._repo.savecommitmessage(self._text)
2290 self._repo.savecommitmessage(self._text)
2288
2291
2289 def filectx(self, path, filelog=None):
2292 def filectx(self, path, filelog=None):
2290 """get a file context from the working directory
2293 """get a file context from the working directory
2291
2294
2292 Returns None if file doesn't exist and should be removed."""
2295 Returns None if file doesn't exist and should be removed."""
2293 return self._filectxfn(self._repo, self, path)
2296 return self._filectxfn(self._repo, self, path)
2294
2297
2295 def commit(self):
2298 def commit(self):
2296 """commit context to the repo"""
2299 """commit context to the repo"""
2297 return self._repo.commitctx(self)
2300 return self._repo.commitctx(self)
2298
2301
2299 @propertycache
2302 @propertycache
2300 def _manifest(self):
2303 def _manifest(self):
2301 """generate a manifest based on the return values of filectxfn"""
2304 """generate a manifest based on the return values of filectxfn"""
2302
2305
2303 # keep this simple for now; just worry about p1
2306 # keep this simple for now; just worry about p1
2304 pctx = self._parents[0]
2307 pctx = self._parents[0]
2305 man = pctx.manifest().copy()
2308 man = pctx.manifest().copy()
2306
2309
2307 for f in self._status.modified:
2310 for f in self._status.modified:
2308 p1node = nullid
2311 p1node = nullid
2309 p2node = nullid
2312 p2node = nullid
2310 p = pctx[f].parents() # if file isn't in pctx, check p2?
2313 p = pctx[f].parents() # if file isn't in pctx, check p2?
2311 if len(p) > 0:
2314 if len(p) > 0:
2312 p1node = p[0].filenode()
2315 p1node = p[0].filenode()
2313 if len(p) > 1:
2316 if len(p) > 1:
2314 p2node = p[1].filenode()
2317 p2node = p[1].filenode()
2315 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2318 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2316
2319
2317 for f in self._status.added:
2320 for f in self._status.added:
2318 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2321 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2319
2322
2320 for f in self._status.removed:
2323 for f in self._status.removed:
2321 if f in man:
2324 if f in man:
2322 del man[f]
2325 del man[f]
2323
2326
2324 return man
2327 return man
2325
2328
2326 @propertycache
2329 @propertycache
2327 def _status(self):
2330 def _status(self):
2328 """Calculate exact status from ``files`` specified at construction
2331 """Calculate exact status from ``files`` specified at construction
2329 """
2332 """
2330 man1 = self.p1().manifest()
2333 man1 = self.p1().manifest()
2331 p2 = self._parents[1]
2334 p2 = self._parents[1]
2332 # "1 < len(self._parents)" can't be used for checking
2335 # "1 < len(self._parents)" can't be used for checking
2333 # existence of the 2nd parent, because "memctx._parents" is
2336 # existence of the 2nd parent, because "memctx._parents" is
2334 # explicitly initialized by the list, of which length is 2.
2337 # explicitly initialized by the list, of which length is 2.
2335 if p2.node() != nullid:
2338 if p2.node() != nullid:
2336 man2 = p2.manifest()
2339 man2 = p2.manifest()
2337 managing = lambda f: f in man1 or f in man2
2340 managing = lambda f: f in man1 or f in man2
2338 else:
2341 else:
2339 managing = lambda f: f in man1
2342 managing = lambda f: f in man1
2340
2343
2341 modified, added, removed = [], [], []
2344 modified, added, removed = [], [], []
2342 for f in self._files:
2345 for f in self._files:
2343 if not managing(f):
2346 if not managing(f):
2344 added.append(f)
2347 added.append(f)
2345 elif self[f]:
2348 elif self[f]:
2346 modified.append(f)
2349 modified.append(f)
2347 else:
2350 else:
2348 removed.append(f)
2351 removed.append(f)
2349
2352
2350 return scmutil.status(modified, added, removed, [], [], [], [])
2353 return scmutil.status(modified, added, removed, [], [], [], [])
2351
2354
2352 class memfilectx(committablefilectx):
2355 class memfilectx(committablefilectx):
2353 """memfilectx represents an in-memory file to commit.
2356 """memfilectx represents an in-memory file to commit.
2354
2357
2355 See memctx and committablefilectx for more details.
2358 See memctx and committablefilectx for more details.
2356 """
2359 """
2357 def __init__(self, repo, path, data, islink=False,
2360 def __init__(self, repo, path, data, islink=False,
2358 isexec=False, copied=None, memctx=None):
2361 isexec=False, copied=None, memctx=None):
2359 """
2362 """
2360 path is the normalized file path relative to repository root.
2363 path is the normalized file path relative to repository root.
2361 data is the file content as a string.
2364 data is the file content as a string.
2362 islink is True if the file is a symbolic link.
2365 islink is True if the file is a symbolic link.
2363 isexec is True if the file is executable.
2366 isexec is True if the file is executable.
2364 copied is the source file path if current file was copied in the
2367 copied is the source file path if current file was copied in the
2365 revision being committed, or None."""
2368 revision being committed, or None."""
2366 super(memfilectx, self).__init__(repo, path, None, memctx)
2369 super(memfilectx, self).__init__(repo, path, None, memctx)
2367 self._data = data
2370 self._data = data
2368 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2371 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2369 self._copied = None
2372 self._copied = None
2370 if copied:
2373 if copied:
2371 self._copied = (copied, nullid)
2374 self._copied = (copied, nullid)
2372
2375
2373 def data(self):
2376 def data(self):
2374 return self._data
2377 return self._data
2375
2378
2376 def remove(self, ignoremissing=False):
2379 def remove(self, ignoremissing=False):
2377 """wraps unlink for a repo's working directory"""
2380 """wraps unlink for a repo's working directory"""
2378 # need to figure out what to do here
2381 # need to figure out what to do here
2379 del self._changectx[self._path]
2382 del self._changectx[self._path]
2380
2383
2381 def write(self, data, flags):
2384 def write(self, data, flags):
2382 """wraps repo.wwrite"""
2385 """wraps repo.wwrite"""
2383 self._data = data
2386 self._data = data
2384
2387
2385 class overlayfilectx(committablefilectx):
2388 class overlayfilectx(committablefilectx):
2386 """Like memfilectx but take an original filectx and optional parameters to
2389 """Like memfilectx but take an original filectx and optional parameters to
2387 override parts of it. This is useful when fctx.data() is expensive (i.e.
2390 override parts of it. This is useful when fctx.data() is expensive (i.e.
2388 flag processor is expensive) and raw data, flags, and filenode could be
2391 flag processor is expensive) and raw data, flags, and filenode could be
2389 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2392 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2390 """
2393 """
2391
2394
2392 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2395 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2393 copied=None, ctx=None):
2396 copied=None, ctx=None):
2394 """originalfctx: filecontext to duplicate
2397 """originalfctx: filecontext to duplicate
2395
2398
2396 datafunc: None or a function to override data (file content). It is a
2399 datafunc: None or a function to override data (file content). It is a
2397 function to be lazy. path, flags, copied, ctx: None or overridden value
2400 function to be lazy. path, flags, copied, ctx: None or overridden value
2398
2401
2399 copied could be (path, rev), or False. copied could also be just path,
2402 copied could be (path, rev), or False. copied could also be just path,
2400 and will be converted to (path, nullid). This simplifies some callers.
2403 and will be converted to (path, nullid). This simplifies some callers.
2401 """
2404 """
2402
2405
2403 if path is None:
2406 if path is None:
2404 path = originalfctx.path()
2407 path = originalfctx.path()
2405 if ctx is None:
2408 if ctx is None:
2406 ctx = originalfctx.changectx()
2409 ctx = originalfctx.changectx()
2407 ctxmatch = lambda: True
2410 ctxmatch = lambda: True
2408 else:
2411 else:
2409 ctxmatch = lambda: ctx == originalfctx.changectx()
2412 ctxmatch = lambda: ctx == originalfctx.changectx()
2410
2413
2411 repo = originalfctx.repo()
2414 repo = originalfctx.repo()
2412 flog = originalfctx.filelog()
2415 flog = originalfctx.filelog()
2413 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2416 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2414
2417
2415 if copied is None:
2418 if copied is None:
2416 copied = originalfctx.renamed()
2419 copied = originalfctx.renamed()
2417 copiedmatch = lambda: True
2420 copiedmatch = lambda: True
2418 else:
2421 else:
2419 if copied and not isinstance(copied, tuple):
2422 if copied and not isinstance(copied, tuple):
2420 # repo._filecommit will recalculate copyrev so nullid is okay
2423 # repo._filecommit will recalculate copyrev so nullid is okay
2421 copied = (copied, nullid)
2424 copied = (copied, nullid)
2422 copiedmatch = lambda: copied == originalfctx.renamed()
2425 copiedmatch = lambda: copied == originalfctx.renamed()
2423
2426
2424 # When data, copied (could affect data), ctx (could affect filelog
2427 # When data, copied (could affect data), ctx (could affect filelog
2425 # parents) are not overridden, rawdata, rawflags, and filenode may be
2428 # parents) are not overridden, rawdata, rawflags, and filenode may be
2426 # reused (repo._filecommit should double check filelog parents).
2429 # reused (repo._filecommit should double check filelog parents).
2427 #
2430 #
2428 # path, flags are not hashed in filelog (but in manifestlog) so they do
2431 # path, flags are not hashed in filelog (but in manifestlog) so they do
2429 # not affect reusable here.
2432 # not affect reusable here.
2430 #
2433 #
2431 # If ctx or copied is overridden to a same value with originalfctx,
2434 # If ctx or copied is overridden to a same value with originalfctx,
2432 # still consider it's reusable. originalfctx.renamed() may be a bit
2435 # still consider it's reusable. originalfctx.renamed() may be a bit
2433 # expensive so it's not called unless necessary. Assuming datafunc is
2436 # expensive so it's not called unless necessary. Assuming datafunc is
2434 # always expensive, do not call it for this "reusable" test.
2437 # always expensive, do not call it for this "reusable" test.
2435 reusable = datafunc is None and ctxmatch() and copiedmatch()
2438 reusable = datafunc is None and ctxmatch() and copiedmatch()
2436
2439
2437 if datafunc is None:
2440 if datafunc is None:
2438 datafunc = originalfctx.data
2441 datafunc = originalfctx.data
2439 if flags is None:
2442 if flags is None:
2440 flags = originalfctx.flags()
2443 flags = originalfctx.flags()
2441
2444
2442 self._datafunc = datafunc
2445 self._datafunc = datafunc
2443 self._flags = flags
2446 self._flags = flags
2444 self._copied = copied
2447 self._copied = copied
2445
2448
2446 if reusable:
2449 if reusable:
2447 # copy extra fields from originalfctx
2450 # copy extra fields from originalfctx
2448 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2451 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2449 for attr_ in attrs:
2452 for attr_ in attrs:
2450 if util.safehasattr(originalfctx, attr_):
2453 if util.safehasattr(originalfctx, attr_):
2451 setattr(self, attr_, getattr(originalfctx, attr_))
2454 setattr(self, attr_, getattr(originalfctx, attr_))
2452
2455
2453 def data(self):
2456 def data(self):
2454 return self._datafunc()
2457 return self._datafunc()
2455
2458
2456 class metadataonlyctx(committablectx):
2459 class metadataonlyctx(committablectx):
2457 """Like memctx but it's reusing the manifest of different commit.
2460 """Like memctx but it's reusing the manifest of different commit.
2458 Intended to be used by lightweight operations that are creating
2461 Intended to be used by lightweight operations that are creating
2459 metadata-only changes.
2462 metadata-only changes.
2460
2463
2461 Revision information is supplied at initialization time. 'repo' is the
2464 Revision information is supplied at initialization time. 'repo' is the
2462 current localrepo, 'ctx' is original revision which manifest we're reuisng
2465 current localrepo, 'ctx' is original revision which manifest we're reuisng
2463 'parents' is a sequence of two parent revisions identifiers (pass None for
2466 'parents' is a sequence of two parent revisions identifiers (pass None for
2464 every missing parent), 'text' is the commit.
2467 every missing parent), 'text' is the commit.
2465
2468
2466 user receives the committer name and defaults to current repository
2469 user receives the committer name and defaults to current repository
2467 username, date is the commit date in any format supported by
2470 username, date is the commit date in any format supported by
2468 util.parsedate() and defaults to current date, extra is a dictionary of
2471 util.parsedate() and defaults to current date, extra is a dictionary of
2469 metadata or is left empty.
2472 metadata or is left empty.
2470 """
2473 """
2471 def __new__(cls, repo, originalctx, *args, **kwargs):
2474 def __new__(cls, repo, originalctx, *args, **kwargs):
2472 return super(metadataonlyctx, cls).__new__(cls, repo)
2475 return super(metadataonlyctx, cls).__new__(cls, repo)
2473
2476
2474 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2477 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2475 date=None, extra=None, editor=False):
2478 date=None, extra=None, editor=False):
2476 if text is None:
2479 if text is None:
2477 text = originalctx.description()
2480 text = originalctx.description()
2478 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2481 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2479 self._rev = None
2482 self._rev = None
2480 self._node = None
2483 self._node = None
2481 self._originalctx = originalctx
2484 self._originalctx = originalctx
2482 self._manifestnode = originalctx.manifestnode()
2485 self._manifestnode = originalctx.manifestnode()
2483 if parents is None:
2486 if parents is None:
2484 parents = originalctx.parents()
2487 parents = originalctx.parents()
2485 else:
2488 else:
2486 parents = [repo[p] for p in parents if p is not None]
2489 parents = [repo[p] for p in parents if p is not None]
2487 parents = parents[:]
2490 parents = parents[:]
2488 while len(parents) < 2:
2491 while len(parents) < 2:
2489 parents.append(repo[nullid])
2492 parents.append(repo[nullid])
2490 p1, p2 = self._parents = parents
2493 p1, p2 = self._parents = parents
2491
2494
2492 # sanity check to ensure that the reused manifest parents are
2495 # sanity check to ensure that the reused manifest parents are
2493 # manifests of our commit parents
2496 # manifests of our commit parents
2494 mp1, mp2 = self.manifestctx().parents
2497 mp1, mp2 = self.manifestctx().parents
2495 if p1 != nullid and p1.manifestnode() != mp1:
2498 if p1 != nullid and p1.manifestnode() != mp1:
2496 raise RuntimeError('can\'t reuse the manifest: '
2499 raise RuntimeError('can\'t reuse the manifest: '
2497 'its p1 doesn\'t match the new ctx p1')
2500 'its p1 doesn\'t match the new ctx p1')
2498 if p2 != nullid and p2.manifestnode() != mp2:
2501 if p2 != nullid and p2.manifestnode() != mp2:
2499 raise RuntimeError('can\'t reuse the manifest: '
2502 raise RuntimeError('can\'t reuse the manifest: '
2500 'its p2 doesn\'t match the new ctx p2')
2503 'its p2 doesn\'t match the new ctx p2')
2501
2504
2502 self._files = originalctx.files()
2505 self._files = originalctx.files()
2503 self.substate = {}
2506 self.substate = {}
2504
2507
2505 if editor:
2508 if editor:
2506 self._text = editor(self._repo, self, [])
2509 self._text = editor(self._repo, self, [])
2507 self._repo.savecommitmessage(self._text)
2510 self._repo.savecommitmessage(self._text)
2508
2511
2509 def manifestnode(self):
2512 def manifestnode(self):
2510 return self._manifestnode
2513 return self._manifestnode
2511
2514
2512 @property
2515 @property
2513 def _manifestctx(self):
2516 def _manifestctx(self):
2514 return self._repo.manifestlog[self._manifestnode]
2517 return self._repo.manifestlog[self._manifestnode]
2515
2518
2516 def filectx(self, path, filelog=None):
2519 def filectx(self, path, filelog=None):
2517 return self._originalctx.filectx(path, filelog=filelog)
2520 return self._originalctx.filectx(path, filelog=filelog)
2518
2521
2519 def commit(self):
2522 def commit(self):
2520 """commit context to the repo"""
2523 """commit context to the repo"""
2521 return self._repo.commitctx(self)
2524 return self._repo.commitctx(self)
2522
2525
2523 @property
2526 @property
2524 def _manifest(self):
2527 def _manifest(self):
2525 return self._originalctx.manifest()
2528 return self._originalctx.manifest()
2526
2529
2527 @propertycache
2530 @propertycache
2528 def _status(self):
2531 def _status(self):
2529 """Calculate exact status from ``files`` specified in the ``origctx``
2532 """Calculate exact status from ``files`` specified in the ``origctx``
2530 and parents manifests.
2533 and parents manifests.
2531 """
2534 """
2532 man1 = self.p1().manifest()
2535 man1 = self.p1().manifest()
2533 p2 = self._parents[1]
2536 p2 = self._parents[1]
2534 # "1 < len(self._parents)" can't be used for checking
2537 # "1 < len(self._parents)" can't be used for checking
2535 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2538 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2536 # explicitly initialized by the list, of which length is 2.
2539 # explicitly initialized by the list, of which length is 2.
2537 if p2.node() != nullid:
2540 if p2.node() != nullid:
2538 man2 = p2.manifest()
2541 man2 = p2.manifest()
2539 managing = lambda f: f in man1 or f in man2
2542 managing = lambda f: f in man1 or f in man2
2540 else:
2543 else:
2541 managing = lambda f: f in man1
2544 managing = lambda f: f in man1
2542
2545
2543 modified, added, removed = [], [], []
2546 modified, added, removed = [], [], []
2544 for f in self._files:
2547 for f in self._files:
2545 if not managing(f):
2548 if not managing(f):
2546 added.append(f)
2549 added.append(f)
2547 elif f in self:
2550 elif f in self:
2548 modified.append(f)
2551 modified.append(f)
2549 else:
2552 else:
2550 removed.append(f)
2553 removed.append(f)
2551
2554
2552 return scmutil.status(modified, added, removed, [], [], [], [])
2555 return scmutil.status(modified, added, removed, [], [], [], [])
2553
2556
2554 class arbitraryfilectx(object):
2557 class arbitraryfilectx(object):
2555 """Allows you to use filectx-like functions on a file in an arbitrary
2558 """Allows you to use filectx-like functions on a file in an arbitrary
2556 location on disk, possibly not in the working directory.
2559 location on disk, possibly not in the working directory.
2557 """
2560 """
2558 def __init__(self, path, repo=None):
2561 def __init__(self, path, repo=None):
2559 # Repo is optional because contrib/simplemerge uses this class.
2562 # Repo is optional because contrib/simplemerge uses this class.
2560 self._repo = repo
2563 self._repo = repo
2561 self._path = path
2564 self._path = path
2562
2565
2563 def cmp(self, fctx):
2566 def cmp(self, fctx):
2564 if isinstance(fctx, workingfilectx) and self._repo:
2567 if isinstance(fctx, workingfilectx) and self._repo:
2565 # Add a fast-path for merge if both sides are disk-backed.
2568 # Add a fast-path for merge if both sides are disk-backed.
2566 # Note that filecmp uses the opposite return values as cmp.
2569 # Note that filecmp uses the opposite return values as cmp.
2567 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2570 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2568 return self.data() != fctx.data()
2571 return self.data() != fctx.data()
2569
2572
2570 def path(self):
2573 def path(self):
2571 return self._path
2574 return self._path
2572
2575
2573 def flags(self):
2576 def flags(self):
2574 return ''
2577 return ''
2575
2578
2576 def data(self):
2579 def data(self):
2577 return util.readfile(self._path)
2580 return util.readfile(self._path)
2578
2581
2579 def decodeddata(self):
2582 def decodeddata(self):
2580 with open(self._path, "rb") as f:
2583 with open(self._path, "rb") as f:
2581 return f.read()
2584 return f.read()
2582
2585
2583 def remove(self):
2586 def remove(self):
2584 util.unlink(self._path)
2587 util.unlink(self._path)
2585
2588
2586 def write(self, data, flags):
2589 def write(self, data, flags):
2587 assert not flags
2590 assert not flags
2588 with open(self._path, "w") as f:
2591 with open(self._path, "w") as f:
2589 f.write(data)
2592 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now