##// END OF EJS Templates
context: rename local 'attr' to 'attr_'...
Siddharth Agarwal -
r34432:52e93106 default
parent child Browse files
Show More
@@ -1,2557 +1,2557 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 repoview,
39 repoview,
40 revlog,
40 revlog,
41 scmutil,
41 scmutil,
42 sparse,
42 sparse,
43 subrepo,
43 subrepo,
44 util,
44 util,
45 )
45 )
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48
48
49 nonascii = re.compile(r'[^\x21-\x7f]').search
49 nonascii = re.compile(r'[^\x21-\x7f]').search
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 if isinstance(changeid, basectx):
59 if isinstance(changeid, basectx):
60 return changeid
60 return changeid
61
61
62 o = super(basectx, cls).__new__(cls)
62 o = super(basectx, cls).__new__(cls)
63
63
64 o._repo = repo
64 o._repo = repo
65 o._rev = nullrev
65 o._rev = nullrev
66 o._node = nullid
66 o._node = nullid
67
67
68 return o
68 return o
69
69
70 def __bytes__(self):
70 def __bytes__(self):
71 return short(self.node())
71 return short(self.node())
72
72
73 __str__ = encoding.strmethod(__bytes__)
73 __str__ = encoding.strmethod(__bytes__)
74
74
75 def __int__(self):
75 def __int__(self):
76 return self.rev()
76 return self.rev()
77
77
78 def __repr__(self):
78 def __repr__(self):
79 return r"<%s %s>" % (type(self).__name__, str(self))
79 return r"<%s %s>" % (type(self).__name__, str(self))
80
80
81 def __eq__(self, other):
81 def __eq__(self, other):
82 try:
82 try:
83 return type(self) == type(other) and self._rev == other._rev
83 return type(self) == type(other) and self._rev == other._rev
84 except AttributeError:
84 except AttributeError:
85 return False
85 return False
86
86
87 def __ne__(self, other):
87 def __ne__(self, other):
88 return not (self == other)
88 return not (self == other)
89
89
90 def __contains__(self, key):
90 def __contains__(self, key):
91 return key in self._manifest
91 return key in self._manifest
92
92
93 def __getitem__(self, key):
93 def __getitem__(self, key):
94 return self.filectx(key)
94 return self.filectx(key)
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return iter(self._manifest)
97 return iter(self._manifest)
98
98
99 def _buildstatusmanifest(self, status):
99 def _buildstatusmanifest(self, status):
100 """Builds a manifest that includes the given status results, if this is
100 """Builds a manifest that includes the given status results, if this is
101 a working copy context. For non-working copy contexts, it just returns
101 a working copy context. For non-working copy contexts, it just returns
102 the normal manifest."""
102 the normal manifest."""
103 return self.manifest()
103 return self.manifest()
104
104
105 def _matchstatus(self, other, match):
105 def _matchstatus(self, other, match):
106 """This internal method provides a way for child objects to override the
106 """This internal method provides a way for child objects to override the
107 match operator.
107 match operator.
108 """
108 """
109 return match
109 return match
110
110
111 def _buildstatus(self, other, s, match, listignored, listclean,
111 def _buildstatus(self, other, s, match, listignored, listclean,
112 listunknown):
112 listunknown):
113 """build a status with respect to another context"""
113 """build a status with respect to another context"""
114 # Load earliest manifest first for caching reasons. More specifically,
114 # Load earliest manifest first for caching reasons. More specifically,
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta application.
119 # delta application.
120 mf2 = None
120 mf2 = None
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
124 if mf2 is None:
124 if mf2 is None:
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126
126
127 modified, added = [], []
127 modified, added = [], []
128 removed = []
128 removed = []
129 clean = []
129 clean = []
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
131 deletedset = set(deleted)
131 deletedset = set(deleted)
132 d = mf1.diff(mf2, match=match, clean=listclean)
132 d = mf1.diff(mf2, match=match, clean=listclean)
133 for fn, value in d.iteritems():
133 for fn, value in d.iteritems():
134 if fn in deletedset:
134 if fn in deletedset:
135 continue
135 continue
136 if value is None:
136 if value is None:
137 clean.append(fn)
137 clean.append(fn)
138 continue
138 continue
139 (node1, flag1), (node2, flag2) = value
139 (node1, flag1), (node2, flag2) = value
140 if node1 is None:
140 if node1 is None:
141 added.append(fn)
141 added.append(fn)
142 elif node2 is None:
142 elif node2 is None:
143 removed.append(fn)
143 removed.append(fn)
144 elif flag1 != flag2:
144 elif flag1 != flag2:
145 modified.append(fn)
145 modified.append(fn)
146 elif node2 not in wdirnodes:
146 elif node2 not in wdirnodes:
147 # When comparing files between two commits, we save time by
147 # When comparing files between two commits, we save time by
148 # not comparing the file contents when the nodeids differ.
148 # not comparing the file contents when the nodeids differ.
149 # Note that this means we incorrectly report a reverted change
149 # Note that this means we incorrectly report a reverted change
150 # to a file as a modification.
150 # to a file as a modification.
151 modified.append(fn)
151 modified.append(fn)
152 elif self[fn].cmp(other[fn]):
152 elif self[fn].cmp(other[fn]):
153 modified.append(fn)
153 modified.append(fn)
154 else:
154 else:
155 clean.append(fn)
155 clean.append(fn)
156
156
157 if removed:
157 if removed:
158 # need to filter files if they are already reported as removed
158 # need to filter files if they are already reported as removed
159 unknown = [fn for fn in unknown if fn not in mf1 and
159 unknown = [fn for fn in unknown if fn not in mf1 and
160 (not match or match(fn))]
160 (not match or match(fn))]
161 ignored = [fn for fn in ignored if fn not in mf1 and
161 ignored = [fn for fn in ignored if fn not in mf1 and
162 (not match or match(fn))]
162 (not match or match(fn))]
163 # if they're deleted, don't report them as removed
163 # if they're deleted, don't report them as removed
164 removed = [fn for fn in removed if fn not in deletedset]
164 removed = [fn for fn in removed if fn not in deletedset]
165
165
166 return scmutil.status(modified, added, removed, deleted, unknown,
166 return scmutil.status(modified, added, removed, deleted, unknown,
167 ignored, clean)
167 ignored, clean)
168
168
169 @propertycache
169 @propertycache
170 def substate(self):
170 def substate(self):
171 return subrepo.state(self, self._repo.ui)
171 return subrepo.state(self, self._repo.ui)
172
172
173 def subrev(self, subpath):
173 def subrev(self, subpath):
174 return self.substate[subpath][1]
174 return self.substate[subpath][1]
175
175
176 def rev(self):
176 def rev(self):
177 return self._rev
177 return self._rev
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180 def hex(self):
180 def hex(self):
181 return hex(self.node())
181 return hex(self.node())
182 def manifest(self):
182 def manifest(self):
183 return self._manifest
183 return self._manifest
184 def manifestctx(self):
184 def manifestctx(self):
185 return self._manifestctx
185 return self._manifestctx
186 def repo(self):
186 def repo(self):
187 return self._repo
187 return self._repo
188 def phasestr(self):
188 def phasestr(self):
189 return phases.phasenames[self.phase()]
189 return phases.phasenames[self.phase()]
190 def mutable(self):
190 def mutable(self):
191 return self.phase() > phases.public
191 return self.phase() > phases.public
192
192
193 def getfileset(self, expr):
193 def getfileset(self, expr):
194 return fileset.getfileset(self, expr)
194 return fileset.getfileset(self, expr)
195
195
196 def obsolete(self):
196 def obsolete(self):
197 """True if the changeset is obsolete"""
197 """True if the changeset is obsolete"""
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
199
199
200 def extinct(self):
200 def extinct(self):
201 """True if the changeset is extinct"""
201 """True if the changeset is extinct"""
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
203
203
204 def unstable(self):
204 def unstable(self):
205 msg = ("'context.unstable' is deprecated, "
205 msg = ("'context.unstable' is deprecated, "
206 "use 'context.orphan'")
206 "use 'context.orphan'")
207 self._repo.ui.deprecwarn(msg, '4.4')
207 self._repo.ui.deprecwarn(msg, '4.4')
208 return self.orphan()
208 return self.orphan()
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete but it's ancestor are"""
211 """True if the changeset is not obsolete but it's ancestor are"""
212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
213
213
214 def bumped(self):
214 def bumped(self):
215 msg = ("'context.bumped' is deprecated, "
215 msg = ("'context.bumped' is deprecated, "
216 "use 'context.phasedivergent'")
216 "use 'context.phasedivergent'")
217 self._repo.ui.deprecwarn(msg, '4.4')
217 self._repo.ui.deprecwarn(msg, '4.4')
218 return self.phasedivergent()
218 return self.phasedivergent()
219
219
220 def phasedivergent(self):
220 def phasedivergent(self):
221 """True if the changeset try to be a successor of a public changeset
221 """True if the changeset try to be a successor of a public changeset
222
222
223 Only non-public and non-obsolete changesets may be bumped.
223 Only non-public and non-obsolete changesets may be bumped.
224 """
224 """
225 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
225 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
226
226
227 def divergent(self):
227 def divergent(self):
228 msg = ("'context.divergent' is deprecated, "
228 msg = ("'context.divergent' is deprecated, "
229 "use 'context.contentdivergent'")
229 "use 'context.contentdivergent'")
230 self._repo.ui.deprecwarn(msg, '4.4')
230 self._repo.ui.deprecwarn(msg, '4.4')
231 return self.contentdivergent()
231 return self.contentdivergent()
232
232
233 def contentdivergent(self):
233 def contentdivergent(self):
234 """Is a successors of a changeset with multiple possible successors set
234 """Is a successors of a changeset with multiple possible successors set
235
235
236 Only non-public and non-obsolete changesets may be divergent.
236 Only non-public and non-obsolete changesets may be divergent.
237 """
237 """
238 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
238 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
239
239
240 def troubled(self):
240 def troubled(self):
241 msg = ("'context.troubled' is deprecated, "
241 msg = ("'context.troubled' is deprecated, "
242 "use 'context.isunstable'")
242 "use 'context.isunstable'")
243 self._repo.ui.deprecwarn(msg, '4.4')
243 self._repo.ui.deprecwarn(msg, '4.4')
244 return self.isunstable()
244 return self.isunstable()
245
245
246 def isunstable(self):
246 def isunstable(self):
247 """True if the changeset is either unstable, bumped or divergent"""
247 """True if the changeset is either unstable, bumped or divergent"""
248 return self.orphan() or self.phasedivergent() or self.contentdivergent()
248 return self.orphan() or self.phasedivergent() or self.contentdivergent()
249
249
250 def troubles(self):
250 def troubles(self):
251 """Keep the old version around in order to avoid breaking extensions
251 """Keep the old version around in order to avoid breaking extensions
252 about different return values.
252 about different return values.
253 """
253 """
254 msg = ("'context.troubles' is deprecated, "
254 msg = ("'context.troubles' is deprecated, "
255 "use 'context.instabilities'")
255 "use 'context.instabilities'")
256 self._repo.ui.deprecwarn(msg, '4.4')
256 self._repo.ui.deprecwarn(msg, '4.4')
257
257
258 troubles = []
258 troubles = []
259 if self.orphan():
259 if self.orphan():
260 troubles.append('orphan')
260 troubles.append('orphan')
261 if self.phasedivergent():
261 if self.phasedivergent():
262 troubles.append('bumped')
262 troubles.append('bumped')
263 if self.contentdivergent():
263 if self.contentdivergent():
264 troubles.append('divergent')
264 troubles.append('divergent')
265 return troubles
265 return troubles
266
266
267 def instabilities(self):
267 def instabilities(self):
268 """return the list of instabilities affecting this changeset.
268 """return the list of instabilities affecting this changeset.
269
269
270 Instabilities are returned as strings. possible values are:
270 Instabilities are returned as strings. possible values are:
271 - orphan,
271 - orphan,
272 - phase-divergent,
272 - phase-divergent,
273 - content-divergent.
273 - content-divergent.
274 """
274 """
275 instabilities = []
275 instabilities = []
276 if self.orphan():
276 if self.orphan():
277 instabilities.append('orphan')
277 instabilities.append('orphan')
278 if self.phasedivergent():
278 if self.phasedivergent():
279 instabilities.append('phase-divergent')
279 instabilities.append('phase-divergent')
280 if self.contentdivergent():
280 if self.contentdivergent():
281 instabilities.append('content-divergent')
281 instabilities.append('content-divergent')
282 return instabilities
282 return instabilities
283
283
284 def parents(self):
284 def parents(self):
285 """return contexts for each parent changeset"""
285 """return contexts for each parent changeset"""
286 return self._parents
286 return self._parents
287
287
288 def p1(self):
288 def p1(self):
289 return self._parents[0]
289 return self._parents[0]
290
290
291 def p2(self):
291 def p2(self):
292 parents = self._parents
292 parents = self._parents
293 if len(parents) == 2:
293 if len(parents) == 2:
294 return parents[1]
294 return parents[1]
295 return changectx(self._repo, nullrev)
295 return changectx(self._repo, nullrev)
296
296
297 def _fileinfo(self, path):
297 def _fileinfo(self, path):
298 if r'_manifest' in self.__dict__:
298 if r'_manifest' in self.__dict__:
299 try:
299 try:
300 return self._manifest[path], self._manifest.flags(path)
300 return self._manifest[path], self._manifest.flags(path)
301 except KeyError:
301 except KeyError:
302 raise error.ManifestLookupError(self._node, path,
302 raise error.ManifestLookupError(self._node, path,
303 _('not found in manifest'))
303 _('not found in manifest'))
304 if r'_manifestdelta' in self.__dict__ or path in self.files():
304 if r'_manifestdelta' in self.__dict__ or path in self.files():
305 if path in self._manifestdelta:
305 if path in self._manifestdelta:
306 return (self._manifestdelta[path],
306 return (self._manifestdelta[path],
307 self._manifestdelta.flags(path))
307 self._manifestdelta.flags(path))
308 mfl = self._repo.manifestlog
308 mfl = self._repo.manifestlog
309 try:
309 try:
310 node, flag = mfl[self._changeset.manifest].find(path)
310 node, flag = mfl[self._changeset.manifest].find(path)
311 except KeyError:
311 except KeyError:
312 raise error.ManifestLookupError(self._node, path,
312 raise error.ManifestLookupError(self._node, path,
313 _('not found in manifest'))
313 _('not found in manifest'))
314
314
315 return node, flag
315 return node, flag
316
316
317 def filenode(self, path):
317 def filenode(self, path):
318 return self._fileinfo(path)[0]
318 return self._fileinfo(path)[0]
319
319
320 def flags(self, path):
320 def flags(self, path):
321 try:
321 try:
322 return self._fileinfo(path)[1]
322 return self._fileinfo(path)[1]
323 except error.LookupError:
323 except error.LookupError:
324 return ''
324 return ''
325
325
326 def sub(self, path, allowcreate=True):
326 def sub(self, path, allowcreate=True):
327 '''return a subrepo for the stored revision of path, never wdir()'''
327 '''return a subrepo for the stored revision of path, never wdir()'''
328 return subrepo.subrepo(self, path, allowcreate=allowcreate)
328 return subrepo.subrepo(self, path, allowcreate=allowcreate)
329
329
330 def nullsub(self, path, pctx):
330 def nullsub(self, path, pctx):
331 return subrepo.nullsubrepo(self, path, pctx)
331 return subrepo.nullsubrepo(self, path, pctx)
332
332
333 def workingsub(self, path):
333 def workingsub(self, path):
334 '''return a subrepo for the stored revision, or wdir if this is a wdir
334 '''return a subrepo for the stored revision, or wdir if this is a wdir
335 context.
335 context.
336 '''
336 '''
337 return subrepo.subrepo(self, path, allowwdir=True)
337 return subrepo.subrepo(self, path, allowwdir=True)
338
338
339 def match(self, pats=None, include=None, exclude=None, default='glob',
339 def match(self, pats=None, include=None, exclude=None, default='glob',
340 listsubrepos=False, badfn=None):
340 listsubrepos=False, badfn=None):
341 r = self._repo
341 r = self._repo
342 return matchmod.match(r.root, r.getcwd(), pats,
342 return matchmod.match(r.root, r.getcwd(), pats,
343 include, exclude, default,
343 include, exclude, default,
344 auditor=r.nofsauditor, ctx=self,
344 auditor=r.nofsauditor, ctx=self,
345 listsubrepos=listsubrepos, badfn=badfn)
345 listsubrepos=listsubrepos, badfn=badfn)
346
346
347 def diff(self, ctx2=None, match=None, **opts):
347 def diff(self, ctx2=None, match=None, **opts):
348 """Returns a diff generator for the given contexts and matcher"""
348 """Returns a diff generator for the given contexts and matcher"""
349 if ctx2 is None:
349 if ctx2 is None:
350 ctx2 = self.p1()
350 ctx2 = self.p1()
351 if ctx2 is not None:
351 if ctx2 is not None:
352 ctx2 = self._repo[ctx2]
352 ctx2 = self._repo[ctx2]
353 diffopts = patch.diffopts(self._repo.ui, opts)
353 diffopts = patch.diffopts(self._repo.ui, opts)
354 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
354 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
355
355
356 def dirs(self):
356 def dirs(self):
357 return self._manifest.dirs()
357 return self._manifest.dirs()
358
358
359 def hasdir(self, dir):
359 def hasdir(self, dir):
360 return self._manifest.hasdir(dir)
360 return self._manifest.hasdir(dir)
361
361
362 def status(self, other=None, match=None, listignored=False,
362 def status(self, other=None, match=None, listignored=False,
363 listclean=False, listunknown=False, listsubrepos=False):
363 listclean=False, listunknown=False, listsubrepos=False):
364 """return status of files between two nodes or node and working
364 """return status of files between two nodes or node and working
365 directory.
365 directory.
366
366
367 If other is None, compare this node with working directory.
367 If other is None, compare this node with working directory.
368
368
369 returns (modified, added, removed, deleted, unknown, ignored, clean)
369 returns (modified, added, removed, deleted, unknown, ignored, clean)
370 """
370 """
371
371
372 ctx1 = self
372 ctx1 = self
373 ctx2 = self._repo[other]
373 ctx2 = self._repo[other]
374
374
375 # This next code block is, admittedly, fragile logic that tests for
375 # This next code block is, admittedly, fragile logic that tests for
376 # reversing the contexts and wouldn't need to exist if it weren't for
376 # reversing the contexts and wouldn't need to exist if it weren't for
377 # the fast (and common) code path of comparing the working directory
377 # the fast (and common) code path of comparing the working directory
378 # with its first parent.
378 # with its first parent.
379 #
379 #
380 # What we're aiming for here is the ability to call:
380 # What we're aiming for here is the ability to call:
381 #
381 #
382 # workingctx.status(parentctx)
382 # workingctx.status(parentctx)
383 #
383 #
384 # If we always built the manifest for each context and compared those,
384 # If we always built the manifest for each context and compared those,
385 # then we'd be done. But the special case of the above call means we
385 # then we'd be done. But the special case of the above call means we
386 # just copy the manifest of the parent.
386 # just copy the manifest of the parent.
387 reversed = False
387 reversed = False
388 if (not isinstance(ctx1, changectx)
388 if (not isinstance(ctx1, changectx)
389 and isinstance(ctx2, changectx)):
389 and isinstance(ctx2, changectx)):
390 reversed = True
390 reversed = True
391 ctx1, ctx2 = ctx2, ctx1
391 ctx1, ctx2 = ctx2, ctx1
392
392
393 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
393 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
394 match = ctx2._matchstatus(ctx1, match)
394 match = ctx2._matchstatus(ctx1, match)
395 r = scmutil.status([], [], [], [], [], [], [])
395 r = scmutil.status([], [], [], [], [], [], [])
396 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
396 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
397 listunknown)
397 listunknown)
398
398
399 if reversed:
399 if reversed:
400 # Reverse added and removed. Clear deleted, unknown and ignored as
400 # Reverse added and removed. Clear deleted, unknown and ignored as
401 # these make no sense to reverse.
401 # these make no sense to reverse.
402 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
402 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
403 r.clean)
403 r.clean)
404
404
405 if listsubrepos:
405 if listsubrepos:
406 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
406 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
407 try:
407 try:
408 rev2 = ctx2.subrev(subpath)
408 rev2 = ctx2.subrev(subpath)
409 except KeyError:
409 except KeyError:
410 # A subrepo that existed in node1 was deleted between
410 # A subrepo that existed in node1 was deleted between
411 # node1 and node2 (inclusive). Thus, ctx2's substate
411 # node1 and node2 (inclusive). Thus, ctx2's substate
412 # won't contain that subpath. The best we can do ignore it.
412 # won't contain that subpath. The best we can do ignore it.
413 rev2 = None
413 rev2 = None
414 submatch = matchmod.subdirmatcher(subpath, match)
414 submatch = matchmod.subdirmatcher(subpath, match)
415 s = sub.status(rev2, match=submatch, ignored=listignored,
415 s = sub.status(rev2, match=submatch, ignored=listignored,
416 clean=listclean, unknown=listunknown,
416 clean=listclean, unknown=listunknown,
417 listsubrepos=True)
417 listsubrepos=True)
418 for rfiles, sfiles in zip(r, s):
418 for rfiles, sfiles in zip(r, s):
419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
420
420
421 for l in r:
421 for l in r:
422 l.sort()
422 l.sort()
423
423
424 return r
424 return r
425
425
426 def _filterederror(repo, changeid):
426 def _filterederror(repo, changeid):
427 """build an exception to be raised about a filtered changeid
427 """build an exception to be raised about a filtered changeid
428
428
429 This is extracted in a function to help extensions (eg: evolve) to
429 This is extracted in a function to help extensions (eg: evolve) to
430 experiment with various message variants."""
430 experiment with various message variants."""
431 if repo.filtername.startswith('visible'):
431 if repo.filtername.startswith('visible'):
432 msg = _("hidden revision '%s'") % changeid
432 msg = _("hidden revision '%s'") % changeid
433 hint = _('use --hidden to access hidden revisions')
433 hint = _('use --hidden to access hidden revisions')
434 return error.FilteredRepoLookupError(msg, hint=hint)
434 return error.FilteredRepoLookupError(msg, hint=hint)
435 msg = _("filtered revision '%s' (not in '%s' subset)")
435 msg = _("filtered revision '%s' (not in '%s' subset)")
436 msg %= (changeid, repo.filtername)
436 msg %= (changeid, repo.filtername)
437 return error.FilteredRepoLookupError(msg)
437 return error.FilteredRepoLookupError(msg)
438
438
439 class changectx(basectx):
439 class changectx(basectx):
440 """A changecontext object makes access to data related to a particular
440 """A changecontext object makes access to data related to a particular
441 changeset convenient. It represents a read-only context already present in
441 changeset convenient. It represents a read-only context already present in
442 the repo."""
442 the repo."""
443 def __init__(self, repo, changeid=''):
443 def __init__(self, repo, changeid=''):
444 """changeid is a revision number, node, or tag"""
444 """changeid is a revision number, node, or tag"""
445
445
446 # since basectx.__new__ already took care of copying the object, we
446 # since basectx.__new__ already took care of copying the object, we
447 # don't need to do anything in __init__, so we just exit here
447 # don't need to do anything in __init__, so we just exit here
448 if isinstance(changeid, basectx):
448 if isinstance(changeid, basectx):
449 return
449 return
450
450
451 if changeid == '':
451 if changeid == '':
452 changeid = '.'
452 changeid = '.'
453 self._repo = repo
453 self._repo = repo
454
454
455 try:
455 try:
456 if isinstance(changeid, int):
456 if isinstance(changeid, int):
457 self._node = repo.changelog.node(changeid)
457 self._node = repo.changelog.node(changeid)
458 self._rev = changeid
458 self._rev = changeid
459 return
459 return
460 if not pycompat.ispy3 and isinstance(changeid, long):
460 if not pycompat.ispy3 and isinstance(changeid, long):
461 changeid = str(changeid)
461 changeid = str(changeid)
462 if changeid == 'null':
462 if changeid == 'null':
463 self._node = nullid
463 self._node = nullid
464 self._rev = nullrev
464 self._rev = nullrev
465 return
465 return
466 if changeid == 'tip':
466 if changeid == 'tip':
467 self._node = repo.changelog.tip()
467 self._node = repo.changelog.tip()
468 self._rev = repo.changelog.rev(self._node)
468 self._rev = repo.changelog.rev(self._node)
469 return
469 return
470 if changeid == '.' or changeid == repo.dirstate.p1():
470 if changeid == '.' or changeid == repo.dirstate.p1():
471 # this is a hack to delay/avoid loading obsmarkers
471 # this is a hack to delay/avoid loading obsmarkers
472 # when we know that '.' won't be hidden
472 # when we know that '.' won't be hidden
473 self._node = repo.dirstate.p1()
473 self._node = repo.dirstate.p1()
474 self._rev = repo.unfiltered().changelog.rev(self._node)
474 self._rev = repo.unfiltered().changelog.rev(self._node)
475 return
475 return
476 if len(changeid) == 20:
476 if len(changeid) == 20:
477 try:
477 try:
478 self._node = changeid
478 self._node = changeid
479 self._rev = repo.changelog.rev(changeid)
479 self._rev = repo.changelog.rev(changeid)
480 return
480 return
481 except error.FilteredRepoLookupError:
481 except error.FilteredRepoLookupError:
482 raise
482 raise
483 except LookupError:
483 except LookupError:
484 pass
484 pass
485
485
486 try:
486 try:
487 r = int(changeid)
487 r = int(changeid)
488 if '%d' % r != changeid:
488 if '%d' % r != changeid:
489 raise ValueError
489 raise ValueError
490 l = len(repo.changelog)
490 l = len(repo.changelog)
491 if r < 0:
491 if r < 0:
492 r += l
492 r += l
493 if r < 0 or r >= l and r != wdirrev:
493 if r < 0 or r >= l and r != wdirrev:
494 raise ValueError
494 raise ValueError
495 self._rev = r
495 self._rev = r
496 self._node = repo.changelog.node(r)
496 self._node = repo.changelog.node(r)
497 return
497 return
498 except error.FilteredIndexError:
498 except error.FilteredIndexError:
499 raise
499 raise
500 except (ValueError, OverflowError, IndexError):
500 except (ValueError, OverflowError, IndexError):
501 pass
501 pass
502
502
503 if len(changeid) == 40:
503 if len(changeid) == 40:
504 try:
504 try:
505 self._node = bin(changeid)
505 self._node = bin(changeid)
506 self._rev = repo.changelog.rev(self._node)
506 self._rev = repo.changelog.rev(self._node)
507 return
507 return
508 except error.FilteredLookupError:
508 except error.FilteredLookupError:
509 raise
509 raise
510 except (TypeError, LookupError):
510 except (TypeError, LookupError):
511 pass
511 pass
512
512
513 # lookup bookmarks through the name interface
513 # lookup bookmarks through the name interface
514 try:
514 try:
515 self._node = repo.names.singlenode(repo, changeid)
515 self._node = repo.names.singlenode(repo, changeid)
516 self._rev = repo.changelog.rev(self._node)
516 self._rev = repo.changelog.rev(self._node)
517 return
517 return
518 except KeyError:
518 except KeyError:
519 pass
519 pass
520 except error.FilteredRepoLookupError:
520 except error.FilteredRepoLookupError:
521 raise
521 raise
522 except error.RepoLookupError:
522 except error.RepoLookupError:
523 pass
523 pass
524
524
525 self._node = repo.unfiltered().changelog._partialmatch(changeid)
525 self._node = repo.unfiltered().changelog._partialmatch(changeid)
526 if self._node is not None:
526 if self._node is not None:
527 self._rev = repo.changelog.rev(self._node)
527 self._rev = repo.changelog.rev(self._node)
528 return
528 return
529
529
530 # lookup failed
530 # lookup failed
531 # check if it might have come from damaged dirstate
531 # check if it might have come from damaged dirstate
532 #
532 #
533 # XXX we could avoid the unfiltered if we had a recognizable
533 # XXX we could avoid the unfiltered if we had a recognizable
534 # exception for filtered changeset access
534 # exception for filtered changeset access
535 if changeid in repo.unfiltered().dirstate.parents():
535 if changeid in repo.unfiltered().dirstate.parents():
536 msg = _("working directory has unknown parent '%s'!")
536 msg = _("working directory has unknown parent '%s'!")
537 raise error.Abort(msg % short(changeid))
537 raise error.Abort(msg % short(changeid))
538 try:
538 try:
539 if len(changeid) == 20 and nonascii(changeid):
539 if len(changeid) == 20 and nonascii(changeid):
540 changeid = hex(changeid)
540 changeid = hex(changeid)
541 except TypeError:
541 except TypeError:
542 pass
542 pass
543 except (error.FilteredIndexError, error.FilteredLookupError,
543 except (error.FilteredIndexError, error.FilteredLookupError,
544 error.FilteredRepoLookupError):
544 error.FilteredRepoLookupError):
545 raise _filterederror(repo, changeid)
545 raise _filterederror(repo, changeid)
546 except IndexError:
546 except IndexError:
547 pass
547 pass
548 raise error.RepoLookupError(
548 raise error.RepoLookupError(
549 _("unknown revision '%s'") % changeid)
549 _("unknown revision '%s'") % changeid)
550
550
551 def __hash__(self):
551 def __hash__(self):
552 try:
552 try:
553 return hash(self._rev)
553 return hash(self._rev)
554 except AttributeError:
554 except AttributeError:
555 return id(self)
555 return id(self)
556
556
557 def __nonzero__(self):
557 def __nonzero__(self):
558 return self._rev != nullrev
558 return self._rev != nullrev
559
559
560 __bool__ = __nonzero__
560 __bool__ = __nonzero__
561
561
562 @propertycache
562 @propertycache
563 def _changeset(self):
563 def _changeset(self):
564 return self._repo.changelog.changelogrevision(self.rev())
564 return self._repo.changelog.changelogrevision(self.rev())
565
565
566 @propertycache
566 @propertycache
567 def _manifest(self):
567 def _manifest(self):
568 return self._manifestctx.read()
568 return self._manifestctx.read()
569
569
570 @property
570 @property
571 def _manifestctx(self):
571 def _manifestctx(self):
572 return self._repo.manifestlog[self._changeset.manifest]
572 return self._repo.manifestlog[self._changeset.manifest]
573
573
574 @propertycache
574 @propertycache
575 def _manifestdelta(self):
575 def _manifestdelta(self):
576 return self._manifestctx.readdelta()
576 return self._manifestctx.readdelta()
577
577
578 @propertycache
578 @propertycache
579 def _parents(self):
579 def _parents(self):
580 repo = self._repo
580 repo = self._repo
581 p1, p2 = repo.changelog.parentrevs(self._rev)
581 p1, p2 = repo.changelog.parentrevs(self._rev)
582 if p2 == nullrev:
582 if p2 == nullrev:
583 return [changectx(repo, p1)]
583 return [changectx(repo, p1)]
584 return [changectx(repo, p1), changectx(repo, p2)]
584 return [changectx(repo, p1), changectx(repo, p2)]
585
585
586 def changeset(self):
586 def changeset(self):
587 c = self._changeset
587 c = self._changeset
588 return (
588 return (
589 c.manifest,
589 c.manifest,
590 c.user,
590 c.user,
591 c.date,
591 c.date,
592 c.files,
592 c.files,
593 c.description,
593 c.description,
594 c.extra,
594 c.extra,
595 )
595 )
596 def manifestnode(self):
596 def manifestnode(self):
597 return self._changeset.manifest
597 return self._changeset.manifest
598
598
599 def user(self):
599 def user(self):
600 return self._changeset.user
600 return self._changeset.user
601 def date(self):
601 def date(self):
602 return self._changeset.date
602 return self._changeset.date
603 def files(self):
603 def files(self):
604 return self._changeset.files
604 return self._changeset.files
605 def description(self):
605 def description(self):
606 return self._changeset.description
606 return self._changeset.description
607 def branch(self):
607 def branch(self):
608 return encoding.tolocal(self._changeset.extra.get("branch"))
608 return encoding.tolocal(self._changeset.extra.get("branch"))
609 def closesbranch(self):
609 def closesbranch(self):
610 return 'close' in self._changeset.extra
610 return 'close' in self._changeset.extra
611 def extra(self):
611 def extra(self):
612 return self._changeset.extra
612 return self._changeset.extra
613 def tags(self):
613 def tags(self):
614 return self._repo.nodetags(self._node)
614 return self._repo.nodetags(self._node)
615 def bookmarks(self):
615 def bookmarks(self):
616 return self._repo.nodebookmarks(self._node)
616 return self._repo.nodebookmarks(self._node)
617 def phase(self):
617 def phase(self):
618 return self._repo._phasecache.phase(self._repo, self._rev)
618 return self._repo._phasecache.phase(self._repo, self._rev)
619 def hidden(self):
619 def hidden(self):
620 return self._rev in repoview.filterrevs(self._repo, 'visible')
620 return self._rev in repoview.filterrevs(self._repo, 'visible')
621
621
622 def children(self):
622 def children(self):
623 """return contexts for each child changeset"""
623 """return contexts for each child changeset"""
624 c = self._repo.changelog.children(self._node)
624 c = self._repo.changelog.children(self._node)
625 return [changectx(self._repo, x) for x in c]
625 return [changectx(self._repo, x) for x in c]
626
626
627 def ancestors(self):
627 def ancestors(self):
628 for a in self._repo.changelog.ancestors([self._rev]):
628 for a in self._repo.changelog.ancestors([self._rev]):
629 yield changectx(self._repo, a)
629 yield changectx(self._repo, a)
630
630
631 def descendants(self):
631 def descendants(self):
632 for d in self._repo.changelog.descendants([self._rev]):
632 for d in self._repo.changelog.descendants([self._rev]):
633 yield changectx(self._repo, d)
633 yield changectx(self._repo, d)
634
634
635 def filectx(self, path, fileid=None, filelog=None):
635 def filectx(self, path, fileid=None, filelog=None):
636 """get a file context from this changeset"""
636 """get a file context from this changeset"""
637 if fileid is None:
637 if fileid is None:
638 fileid = self.filenode(path)
638 fileid = self.filenode(path)
639 return filectx(self._repo, path, fileid=fileid,
639 return filectx(self._repo, path, fileid=fileid,
640 changectx=self, filelog=filelog)
640 changectx=self, filelog=filelog)
641
641
642 def ancestor(self, c2, warn=False):
642 def ancestor(self, c2, warn=False):
643 """return the "best" ancestor context of self and c2
643 """return the "best" ancestor context of self and c2
644
644
645 If there are multiple candidates, it will show a message and check
645 If there are multiple candidates, it will show a message and check
646 merge.preferancestor configuration before falling back to the
646 merge.preferancestor configuration before falling back to the
647 revlog ancestor."""
647 revlog ancestor."""
648 # deal with workingctxs
648 # deal with workingctxs
649 n2 = c2._node
649 n2 = c2._node
650 if n2 is None:
650 if n2 is None:
651 n2 = c2._parents[0]._node
651 n2 = c2._parents[0]._node
652 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
652 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
653 if not cahs:
653 if not cahs:
654 anc = nullid
654 anc = nullid
655 elif len(cahs) == 1:
655 elif len(cahs) == 1:
656 anc = cahs[0]
656 anc = cahs[0]
657 else:
657 else:
658 # experimental config: merge.preferancestor
658 # experimental config: merge.preferancestor
659 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
659 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
660 try:
660 try:
661 ctx = changectx(self._repo, r)
661 ctx = changectx(self._repo, r)
662 except error.RepoLookupError:
662 except error.RepoLookupError:
663 continue
663 continue
664 anc = ctx.node()
664 anc = ctx.node()
665 if anc in cahs:
665 if anc in cahs:
666 break
666 break
667 else:
667 else:
668 anc = self._repo.changelog.ancestor(self._node, n2)
668 anc = self._repo.changelog.ancestor(self._node, n2)
669 if warn:
669 if warn:
670 self._repo.ui.status(
670 self._repo.ui.status(
671 (_("note: using %s as ancestor of %s and %s\n") %
671 (_("note: using %s as ancestor of %s and %s\n") %
672 (short(anc), short(self._node), short(n2))) +
672 (short(anc), short(self._node), short(n2))) +
673 ''.join(_(" alternatively, use --config "
673 ''.join(_(" alternatively, use --config "
674 "merge.preferancestor=%s\n") %
674 "merge.preferancestor=%s\n") %
675 short(n) for n in sorted(cahs) if n != anc))
675 short(n) for n in sorted(cahs) if n != anc))
676 return changectx(self._repo, anc)
676 return changectx(self._repo, anc)
677
677
678 def descendant(self, other):
678 def descendant(self, other):
679 """True if other is descendant of this changeset"""
679 """True if other is descendant of this changeset"""
680 return self._repo.changelog.descendant(self._rev, other._rev)
680 return self._repo.changelog.descendant(self._rev, other._rev)
681
681
682 def walk(self, match):
682 def walk(self, match):
683 '''Generates matching file names.'''
683 '''Generates matching file names.'''
684
684
685 # Wrap match.bad method to have message with nodeid
685 # Wrap match.bad method to have message with nodeid
686 def bad(fn, msg):
686 def bad(fn, msg):
687 # The manifest doesn't know about subrepos, so don't complain about
687 # The manifest doesn't know about subrepos, so don't complain about
688 # paths into valid subrepos.
688 # paths into valid subrepos.
689 if any(fn == s or fn.startswith(s + '/')
689 if any(fn == s or fn.startswith(s + '/')
690 for s in self.substate):
690 for s in self.substate):
691 return
691 return
692 match.bad(fn, _('no such file in rev %s') % self)
692 match.bad(fn, _('no such file in rev %s') % self)
693
693
694 m = matchmod.badmatch(match, bad)
694 m = matchmod.badmatch(match, bad)
695 return self._manifest.walk(m)
695 return self._manifest.walk(m)
696
696
697 def matches(self, match):
697 def matches(self, match):
698 return self.walk(match)
698 return self.walk(match)
699
699
700 class basefilectx(object):
700 class basefilectx(object):
701 """A filecontext object represents the common logic for its children:
701 """A filecontext object represents the common logic for its children:
702 filectx: read-only access to a filerevision that is already present
702 filectx: read-only access to a filerevision that is already present
703 in the repo,
703 in the repo,
704 workingfilectx: a filecontext that represents files from the working
704 workingfilectx: a filecontext that represents files from the working
705 directory,
705 directory,
706 memfilectx: a filecontext that represents files in-memory,
706 memfilectx: a filecontext that represents files in-memory,
707 overlayfilectx: duplicate another filecontext with some fields overridden.
707 overlayfilectx: duplicate another filecontext with some fields overridden.
708 """
708 """
709 @propertycache
709 @propertycache
710 def _filelog(self):
710 def _filelog(self):
711 return self._repo.file(self._path)
711 return self._repo.file(self._path)
712
712
713 @propertycache
713 @propertycache
714 def _changeid(self):
714 def _changeid(self):
715 if r'_changeid' in self.__dict__:
715 if r'_changeid' in self.__dict__:
716 return self._changeid
716 return self._changeid
717 elif r'_changectx' in self.__dict__:
717 elif r'_changectx' in self.__dict__:
718 return self._changectx.rev()
718 return self._changectx.rev()
719 elif r'_descendantrev' in self.__dict__:
719 elif r'_descendantrev' in self.__dict__:
720 # this file context was created from a revision with a known
720 # this file context was created from a revision with a known
721 # descendant, we can (lazily) correct for linkrev aliases
721 # descendant, we can (lazily) correct for linkrev aliases
722 return self._adjustlinkrev(self._descendantrev)
722 return self._adjustlinkrev(self._descendantrev)
723 else:
723 else:
724 return self._filelog.linkrev(self._filerev)
724 return self._filelog.linkrev(self._filerev)
725
725
726 @propertycache
726 @propertycache
727 def _filenode(self):
727 def _filenode(self):
728 if r'_fileid' in self.__dict__:
728 if r'_fileid' in self.__dict__:
729 return self._filelog.lookup(self._fileid)
729 return self._filelog.lookup(self._fileid)
730 else:
730 else:
731 return self._changectx.filenode(self._path)
731 return self._changectx.filenode(self._path)
732
732
733 @propertycache
733 @propertycache
734 def _filerev(self):
734 def _filerev(self):
735 return self._filelog.rev(self._filenode)
735 return self._filelog.rev(self._filenode)
736
736
737 @propertycache
737 @propertycache
738 def _repopath(self):
738 def _repopath(self):
739 return self._path
739 return self._path
740
740
741 def __nonzero__(self):
741 def __nonzero__(self):
742 try:
742 try:
743 self._filenode
743 self._filenode
744 return True
744 return True
745 except error.LookupError:
745 except error.LookupError:
746 # file is missing
746 # file is missing
747 return False
747 return False
748
748
749 __bool__ = __nonzero__
749 __bool__ = __nonzero__
750
750
751 def __bytes__(self):
751 def __bytes__(self):
752 try:
752 try:
753 return "%s@%s" % (self.path(), self._changectx)
753 return "%s@%s" % (self.path(), self._changectx)
754 except error.LookupError:
754 except error.LookupError:
755 return "%s@???" % self.path()
755 return "%s@???" % self.path()
756
756
757 __str__ = encoding.strmethod(__bytes__)
757 __str__ = encoding.strmethod(__bytes__)
758
758
759 def __repr__(self):
759 def __repr__(self):
760 return "<%s %s>" % (type(self).__name__, str(self))
760 return "<%s %s>" % (type(self).__name__, str(self))
761
761
762 def __hash__(self):
762 def __hash__(self):
763 try:
763 try:
764 return hash((self._path, self._filenode))
764 return hash((self._path, self._filenode))
765 except AttributeError:
765 except AttributeError:
766 return id(self)
766 return id(self)
767
767
768 def __eq__(self, other):
768 def __eq__(self, other):
769 try:
769 try:
770 return (type(self) == type(other) and self._path == other._path
770 return (type(self) == type(other) and self._path == other._path
771 and self._filenode == other._filenode)
771 and self._filenode == other._filenode)
772 except AttributeError:
772 except AttributeError:
773 return False
773 return False
774
774
775 def __ne__(self, other):
775 def __ne__(self, other):
776 return not (self == other)
776 return not (self == other)
777
777
778 def filerev(self):
778 def filerev(self):
779 return self._filerev
779 return self._filerev
780 def filenode(self):
780 def filenode(self):
781 return self._filenode
781 return self._filenode
782 @propertycache
782 @propertycache
783 def _flags(self):
783 def _flags(self):
784 return self._changectx.flags(self._path)
784 return self._changectx.flags(self._path)
785 def flags(self):
785 def flags(self):
786 return self._flags
786 return self._flags
787 def filelog(self):
787 def filelog(self):
788 return self._filelog
788 return self._filelog
789 def rev(self):
789 def rev(self):
790 return self._changeid
790 return self._changeid
791 def linkrev(self):
791 def linkrev(self):
792 return self._filelog.linkrev(self._filerev)
792 return self._filelog.linkrev(self._filerev)
793 def node(self):
793 def node(self):
794 return self._changectx.node()
794 return self._changectx.node()
795 def hex(self):
795 def hex(self):
796 return self._changectx.hex()
796 return self._changectx.hex()
797 def user(self):
797 def user(self):
798 return self._changectx.user()
798 return self._changectx.user()
799 def date(self):
799 def date(self):
800 return self._changectx.date()
800 return self._changectx.date()
801 def files(self):
801 def files(self):
802 return self._changectx.files()
802 return self._changectx.files()
803 def description(self):
803 def description(self):
804 return self._changectx.description()
804 return self._changectx.description()
805 def branch(self):
805 def branch(self):
806 return self._changectx.branch()
806 return self._changectx.branch()
807 def extra(self):
807 def extra(self):
808 return self._changectx.extra()
808 return self._changectx.extra()
809 def phase(self):
809 def phase(self):
810 return self._changectx.phase()
810 return self._changectx.phase()
811 def phasestr(self):
811 def phasestr(self):
812 return self._changectx.phasestr()
812 return self._changectx.phasestr()
813 def manifest(self):
813 def manifest(self):
814 return self._changectx.manifest()
814 return self._changectx.manifest()
815 def changectx(self):
815 def changectx(self):
816 return self._changectx
816 return self._changectx
817 def renamed(self):
817 def renamed(self):
818 return self._copied
818 return self._copied
819 def repo(self):
819 def repo(self):
820 return self._repo
820 return self._repo
821 def size(self):
821 def size(self):
822 return len(self.data())
822 return len(self.data())
823
823
824 def path(self):
824 def path(self):
825 return self._path
825 return self._path
826
826
827 def isbinary(self):
827 def isbinary(self):
828 try:
828 try:
829 return util.binary(self.data())
829 return util.binary(self.data())
830 except IOError:
830 except IOError:
831 return False
831 return False
832 def isexec(self):
832 def isexec(self):
833 return 'x' in self.flags()
833 return 'x' in self.flags()
834 def islink(self):
834 def islink(self):
835 return 'l' in self.flags()
835 return 'l' in self.flags()
836
836
837 def isabsent(self):
837 def isabsent(self):
838 """whether this filectx represents a file not in self._changectx
838 """whether this filectx represents a file not in self._changectx
839
839
840 This is mainly for merge code to detect change/delete conflicts. This is
840 This is mainly for merge code to detect change/delete conflicts. This is
841 expected to be True for all subclasses of basectx."""
841 expected to be True for all subclasses of basectx."""
842 return False
842 return False
843
843
844 _customcmp = False
844 _customcmp = False
845 def cmp(self, fctx):
845 def cmp(self, fctx):
846 """compare with other file context
846 """compare with other file context
847
847
848 returns True if different than fctx.
848 returns True if different than fctx.
849 """
849 """
850 if fctx._customcmp:
850 if fctx._customcmp:
851 return fctx.cmp(self)
851 return fctx.cmp(self)
852
852
853 if (fctx._filenode is None
853 if (fctx._filenode is None
854 and (self._repo._encodefilterpats
854 and (self._repo._encodefilterpats
855 # if file data starts with '\1\n', empty metadata block is
855 # if file data starts with '\1\n', empty metadata block is
856 # prepended, which adds 4 bytes to filelog.size().
856 # prepended, which adds 4 bytes to filelog.size().
857 or self.size() - 4 == fctx.size())
857 or self.size() - 4 == fctx.size())
858 or self.size() == fctx.size()):
858 or self.size() == fctx.size()):
859 return self._filelog.cmp(self._filenode, fctx.data())
859 return self._filelog.cmp(self._filenode, fctx.data())
860
860
861 return True
861 return True
862
862
863 def _adjustlinkrev(self, srcrev, inclusive=False):
863 def _adjustlinkrev(self, srcrev, inclusive=False):
864 """return the first ancestor of <srcrev> introducing <fnode>
864 """return the first ancestor of <srcrev> introducing <fnode>
865
865
866 If the linkrev of the file revision does not point to an ancestor of
866 If the linkrev of the file revision does not point to an ancestor of
867 srcrev, we'll walk down the ancestors until we find one introducing
867 srcrev, we'll walk down the ancestors until we find one introducing
868 this file revision.
868 this file revision.
869
869
870 :srcrev: the changeset revision we search ancestors from
870 :srcrev: the changeset revision we search ancestors from
871 :inclusive: if true, the src revision will also be checked
871 :inclusive: if true, the src revision will also be checked
872 """
872 """
873 repo = self._repo
873 repo = self._repo
874 cl = repo.unfiltered().changelog
874 cl = repo.unfiltered().changelog
875 mfl = repo.manifestlog
875 mfl = repo.manifestlog
876 # fetch the linkrev
876 # fetch the linkrev
877 lkr = self.linkrev()
877 lkr = self.linkrev()
878 # hack to reuse ancestor computation when searching for renames
878 # hack to reuse ancestor computation when searching for renames
879 memberanc = getattr(self, '_ancestrycontext', None)
879 memberanc = getattr(self, '_ancestrycontext', None)
880 iteranc = None
880 iteranc = None
881 if srcrev is None:
881 if srcrev is None:
882 # wctx case, used by workingfilectx during mergecopy
882 # wctx case, used by workingfilectx during mergecopy
883 revs = [p.rev() for p in self._repo[None].parents()]
883 revs = [p.rev() for p in self._repo[None].parents()]
884 inclusive = True # we skipped the real (revless) source
884 inclusive = True # we skipped the real (revless) source
885 else:
885 else:
886 revs = [srcrev]
886 revs = [srcrev]
887 if memberanc is None:
887 if memberanc is None:
888 memberanc = iteranc = cl.ancestors(revs, lkr,
888 memberanc = iteranc = cl.ancestors(revs, lkr,
889 inclusive=inclusive)
889 inclusive=inclusive)
890 # check if this linkrev is an ancestor of srcrev
890 # check if this linkrev is an ancestor of srcrev
891 if lkr not in memberanc:
891 if lkr not in memberanc:
892 if iteranc is None:
892 if iteranc is None:
893 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
893 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
894 fnode = self._filenode
894 fnode = self._filenode
895 path = self._path
895 path = self._path
896 for a in iteranc:
896 for a in iteranc:
897 ac = cl.read(a) # get changeset data (we avoid object creation)
897 ac = cl.read(a) # get changeset data (we avoid object creation)
898 if path in ac[3]: # checking the 'files' field.
898 if path in ac[3]: # checking the 'files' field.
899 # The file has been touched, check if the content is
899 # The file has been touched, check if the content is
900 # similar to the one we search for.
900 # similar to the one we search for.
901 if fnode == mfl[ac[0]].readfast().get(path):
901 if fnode == mfl[ac[0]].readfast().get(path):
902 return a
902 return a
903 # In theory, we should never get out of that loop without a result.
903 # In theory, we should never get out of that loop without a result.
904 # But if manifest uses a buggy file revision (not children of the
904 # But if manifest uses a buggy file revision (not children of the
905 # one it replaces) we could. Such a buggy situation will likely
905 # one it replaces) we could. Such a buggy situation will likely
906 # result is crash somewhere else at to some point.
906 # result is crash somewhere else at to some point.
907 return lkr
907 return lkr
908
908
909 def introrev(self):
909 def introrev(self):
910 """return the rev of the changeset which introduced this file revision
910 """return the rev of the changeset which introduced this file revision
911
911
912 This method is different from linkrev because it take into account the
912 This method is different from linkrev because it take into account the
913 changeset the filectx was created from. It ensures the returned
913 changeset the filectx was created from. It ensures the returned
914 revision is one of its ancestors. This prevents bugs from
914 revision is one of its ancestors. This prevents bugs from
915 'linkrev-shadowing' when a file revision is used by multiple
915 'linkrev-shadowing' when a file revision is used by multiple
916 changesets.
916 changesets.
917 """
917 """
918 lkr = self.linkrev()
918 lkr = self.linkrev()
919 attrs = vars(self)
919 attrs = vars(self)
920 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
920 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
921 if noctx or self.rev() == lkr:
921 if noctx or self.rev() == lkr:
922 return self.linkrev()
922 return self.linkrev()
923 return self._adjustlinkrev(self.rev(), inclusive=True)
923 return self._adjustlinkrev(self.rev(), inclusive=True)
924
924
925 def _parentfilectx(self, path, fileid, filelog):
925 def _parentfilectx(self, path, fileid, filelog):
926 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
926 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
927 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
927 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
928 if '_changeid' in vars(self) or '_changectx' in vars(self):
928 if '_changeid' in vars(self) or '_changectx' in vars(self):
929 # If self is associated with a changeset (probably explicitly
929 # If self is associated with a changeset (probably explicitly
930 # fed), ensure the created filectx is associated with a
930 # fed), ensure the created filectx is associated with a
931 # changeset that is an ancestor of self.changectx.
931 # changeset that is an ancestor of self.changectx.
932 # This lets us later use _adjustlinkrev to get a correct link.
932 # This lets us later use _adjustlinkrev to get a correct link.
933 fctx._descendantrev = self.rev()
933 fctx._descendantrev = self.rev()
934 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
934 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 elif '_descendantrev' in vars(self):
935 elif '_descendantrev' in vars(self):
936 # Otherwise propagate _descendantrev if we have one associated.
936 # Otherwise propagate _descendantrev if we have one associated.
937 fctx._descendantrev = self._descendantrev
937 fctx._descendantrev = self._descendantrev
938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 return fctx
939 return fctx
940
940
941 def parents(self):
941 def parents(self):
942 _path = self._path
942 _path = self._path
943 fl = self._filelog
943 fl = self._filelog
944 parents = self._filelog.parents(self._filenode)
944 parents = self._filelog.parents(self._filenode)
945 pl = [(_path, node, fl) for node in parents if node != nullid]
945 pl = [(_path, node, fl) for node in parents if node != nullid]
946
946
947 r = fl.renamed(self._filenode)
947 r = fl.renamed(self._filenode)
948 if r:
948 if r:
949 # - In the simple rename case, both parent are nullid, pl is empty.
949 # - In the simple rename case, both parent are nullid, pl is empty.
950 # - In case of merge, only one of the parent is null id and should
950 # - In case of merge, only one of the parent is null id and should
951 # be replaced with the rename information. This parent is -always-
951 # be replaced with the rename information. This parent is -always-
952 # the first one.
952 # the first one.
953 #
953 #
954 # As null id have always been filtered out in the previous list
954 # As null id have always been filtered out in the previous list
955 # comprehension, inserting to 0 will always result in "replacing
955 # comprehension, inserting to 0 will always result in "replacing
956 # first nullid parent with rename information.
956 # first nullid parent with rename information.
957 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
957 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
958
958
959 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
959 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
960
960
961 def p1(self):
961 def p1(self):
962 return self.parents()[0]
962 return self.parents()[0]
963
963
964 def p2(self):
964 def p2(self):
965 p = self.parents()
965 p = self.parents()
966 if len(p) == 2:
966 if len(p) == 2:
967 return p[1]
967 return p[1]
968 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
968 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
969
969
970 def annotate(self, follow=False, linenumber=False, skiprevs=None,
970 def annotate(self, follow=False, linenumber=False, skiprevs=None,
971 diffopts=None):
971 diffopts=None):
972 '''returns a list of tuples of ((ctx, number), line) for each line
972 '''returns a list of tuples of ((ctx, number), line) for each line
973 in the file, where ctx is the filectx of the node where
973 in the file, where ctx is the filectx of the node where
974 that line was last changed; if linenumber parameter is true, number is
974 that line was last changed; if linenumber parameter is true, number is
975 the line number at the first appearance in the managed file, otherwise,
975 the line number at the first appearance in the managed file, otherwise,
976 number has a fixed value of False.
976 number has a fixed value of False.
977 '''
977 '''
978
978
979 def lines(text):
979 def lines(text):
980 if text.endswith("\n"):
980 if text.endswith("\n"):
981 return text.count("\n")
981 return text.count("\n")
982 return text.count("\n") + int(bool(text))
982 return text.count("\n") + int(bool(text))
983
983
984 if linenumber:
984 if linenumber:
985 def decorate(text, rev):
985 def decorate(text, rev):
986 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
986 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
987 else:
987 else:
988 def decorate(text, rev):
988 def decorate(text, rev):
989 return ([(rev, False)] * lines(text), text)
989 return ([(rev, False)] * lines(text), text)
990
990
991 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
991 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
992
992
993 def parents(f):
993 def parents(f):
994 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
994 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
995 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
995 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
996 # from the topmost introrev (= srcrev) down to p.linkrev() if it
996 # from the topmost introrev (= srcrev) down to p.linkrev() if it
997 # isn't an ancestor of the srcrev.
997 # isn't an ancestor of the srcrev.
998 f._changeid
998 f._changeid
999 pl = f.parents()
999 pl = f.parents()
1000
1000
1001 # Don't return renamed parents if we aren't following.
1001 # Don't return renamed parents if we aren't following.
1002 if not follow:
1002 if not follow:
1003 pl = [p for p in pl if p.path() == f.path()]
1003 pl = [p for p in pl if p.path() == f.path()]
1004
1004
1005 # renamed filectx won't have a filelog yet, so set it
1005 # renamed filectx won't have a filelog yet, so set it
1006 # from the cache to save time
1006 # from the cache to save time
1007 for p in pl:
1007 for p in pl:
1008 if not '_filelog' in p.__dict__:
1008 if not '_filelog' in p.__dict__:
1009 p._filelog = getlog(p.path())
1009 p._filelog = getlog(p.path())
1010
1010
1011 return pl
1011 return pl
1012
1012
1013 # use linkrev to find the first changeset where self appeared
1013 # use linkrev to find the first changeset where self appeared
1014 base = self
1014 base = self
1015 introrev = self.introrev()
1015 introrev = self.introrev()
1016 if self.rev() != introrev:
1016 if self.rev() != introrev:
1017 base = self.filectx(self.filenode(), changeid=introrev)
1017 base = self.filectx(self.filenode(), changeid=introrev)
1018 if getattr(base, '_ancestrycontext', None) is None:
1018 if getattr(base, '_ancestrycontext', None) is None:
1019 cl = self._repo.changelog
1019 cl = self._repo.changelog
1020 if introrev is None:
1020 if introrev is None:
1021 # wctx is not inclusive, but works because _ancestrycontext
1021 # wctx is not inclusive, but works because _ancestrycontext
1022 # is used to test filelog revisions
1022 # is used to test filelog revisions
1023 ac = cl.ancestors([p.rev() for p in base.parents()],
1023 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 inclusive=True)
1024 inclusive=True)
1025 else:
1025 else:
1026 ac = cl.ancestors([introrev], inclusive=True)
1026 ac = cl.ancestors([introrev], inclusive=True)
1027 base._ancestrycontext = ac
1027 base._ancestrycontext = ac
1028
1028
1029 # This algorithm would prefer to be recursive, but Python is a
1029 # This algorithm would prefer to be recursive, but Python is a
1030 # bit recursion-hostile. Instead we do an iterative
1030 # bit recursion-hostile. Instead we do an iterative
1031 # depth-first search.
1031 # depth-first search.
1032
1032
1033 # 1st DFS pre-calculates pcache and needed
1033 # 1st DFS pre-calculates pcache and needed
1034 visit = [base]
1034 visit = [base]
1035 pcache = {}
1035 pcache = {}
1036 needed = {base: 1}
1036 needed = {base: 1}
1037 while visit:
1037 while visit:
1038 f = visit.pop()
1038 f = visit.pop()
1039 if f in pcache:
1039 if f in pcache:
1040 continue
1040 continue
1041 pl = parents(f)
1041 pl = parents(f)
1042 pcache[f] = pl
1042 pcache[f] = pl
1043 for p in pl:
1043 for p in pl:
1044 needed[p] = needed.get(p, 0) + 1
1044 needed[p] = needed.get(p, 0) + 1
1045 if p not in pcache:
1045 if p not in pcache:
1046 visit.append(p)
1046 visit.append(p)
1047
1047
1048 # 2nd DFS does the actual annotate
1048 # 2nd DFS does the actual annotate
1049 visit[:] = [base]
1049 visit[:] = [base]
1050 hist = {}
1050 hist = {}
1051 while visit:
1051 while visit:
1052 f = visit[-1]
1052 f = visit[-1]
1053 if f in hist:
1053 if f in hist:
1054 visit.pop()
1054 visit.pop()
1055 continue
1055 continue
1056
1056
1057 ready = True
1057 ready = True
1058 pl = pcache[f]
1058 pl = pcache[f]
1059 for p in pl:
1059 for p in pl:
1060 if p not in hist:
1060 if p not in hist:
1061 ready = False
1061 ready = False
1062 visit.append(p)
1062 visit.append(p)
1063 if ready:
1063 if ready:
1064 visit.pop()
1064 visit.pop()
1065 curr = decorate(f.data(), f)
1065 curr = decorate(f.data(), f)
1066 skipchild = False
1066 skipchild = False
1067 if skiprevs is not None:
1067 if skiprevs is not None:
1068 skipchild = f._changeid in skiprevs
1068 skipchild = f._changeid in skiprevs
1069 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1069 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 diffopts)
1070 diffopts)
1071 for p in pl:
1071 for p in pl:
1072 if needed[p] == 1:
1072 if needed[p] == 1:
1073 del hist[p]
1073 del hist[p]
1074 del needed[p]
1074 del needed[p]
1075 else:
1075 else:
1076 needed[p] -= 1
1076 needed[p] -= 1
1077
1077
1078 hist[f] = curr
1078 hist[f] = curr
1079 del pcache[f]
1079 del pcache[f]
1080
1080
1081 return zip(hist[base][0], hist[base][1].splitlines(True))
1081 return zip(hist[base][0], hist[base][1].splitlines(True))
1082
1082
1083 def ancestors(self, followfirst=False):
1083 def ancestors(self, followfirst=False):
1084 visit = {}
1084 visit = {}
1085 c = self
1085 c = self
1086 if followfirst:
1086 if followfirst:
1087 cut = 1
1087 cut = 1
1088 else:
1088 else:
1089 cut = None
1089 cut = None
1090
1090
1091 while True:
1091 while True:
1092 for parent in c.parents()[:cut]:
1092 for parent in c.parents()[:cut]:
1093 visit[(parent.linkrev(), parent.filenode())] = parent
1093 visit[(parent.linkrev(), parent.filenode())] = parent
1094 if not visit:
1094 if not visit:
1095 break
1095 break
1096 c = visit.pop(max(visit))
1096 c = visit.pop(max(visit))
1097 yield c
1097 yield c
1098
1098
1099 def decodeddata(self):
1099 def decodeddata(self):
1100 """Returns `data()` after running repository decoding filters.
1100 """Returns `data()` after running repository decoding filters.
1101
1101
1102 This is often equivalent to how the data would be expressed on disk.
1102 This is often equivalent to how the data would be expressed on disk.
1103 """
1103 """
1104 return self._repo.wwritedata(self.path(), self.data())
1104 return self._repo.wwritedata(self.path(), self.data())
1105
1105
1106 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1106 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1107 r'''
1107 r'''
1108 Given parent and child fctxes and annotate data for parents, for all lines
1108 Given parent and child fctxes and annotate data for parents, for all lines
1109 in either parent that match the child, annotate the child with the parent's
1109 in either parent that match the child, annotate the child with the parent's
1110 data.
1110 data.
1111
1111
1112 Additionally, if `skipchild` is True, replace all other lines with parent
1112 Additionally, if `skipchild` is True, replace all other lines with parent
1113 annotate data as well such that child is never blamed for any lines.
1113 annotate data as well such that child is never blamed for any lines.
1114
1114
1115 See test-annotate.py for unit tests.
1115 See test-annotate.py for unit tests.
1116 '''
1116 '''
1117 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1117 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1118 for parent in parents]
1118 for parent in parents]
1119
1119
1120 if skipchild:
1120 if skipchild:
1121 # Need to iterate over the blocks twice -- make it a list
1121 # Need to iterate over the blocks twice -- make it a list
1122 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1122 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1123 # Mercurial currently prefers p2 over p1 for annotate.
1123 # Mercurial currently prefers p2 over p1 for annotate.
1124 # TODO: change this?
1124 # TODO: change this?
1125 for parent, blocks in pblocks:
1125 for parent, blocks in pblocks:
1126 for (a1, a2, b1, b2), t in blocks:
1126 for (a1, a2, b1, b2), t in blocks:
1127 # Changed blocks ('!') or blocks made only of blank lines ('~')
1127 # Changed blocks ('!') or blocks made only of blank lines ('~')
1128 # belong to the child.
1128 # belong to the child.
1129 if t == '=':
1129 if t == '=':
1130 child[0][b1:b2] = parent[0][a1:a2]
1130 child[0][b1:b2] = parent[0][a1:a2]
1131
1131
1132 if skipchild:
1132 if skipchild:
1133 # Now try and match up anything that couldn't be matched,
1133 # Now try and match up anything that couldn't be matched,
1134 # Reversing pblocks maintains bias towards p2, matching above
1134 # Reversing pblocks maintains bias towards p2, matching above
1135 # behavior.
1135 # behavior.
1136 pblocks.reverse()
1136 pblocks.reverse()
1137
1137
1138 # The heuristics are:
1138 # The heuristics are:
1139 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1139 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1140 # This could potentially be smarter but works well enough.
1140 # This could potentially be smarter but works well enough.
1141 # * For a non-matching section, do a best-effort fit. Match lines in
1141 # * For a non-matching section, do a best-effort fit. Match lines in
1142 # diff hunks 1:1, dropping lines as necessary.
1142 # diff hunks 1:1, dropping lines as necessary.
1143 # * Repeat the last line as a last resort.
1143 # * Repeat the last line as a last resort.
1144
1144
1145 # First, replace as much as possible without repeating the last line.
1145 # First, replace as much as possible without repeating the last line.
1146 remaining = [(parent, []) for parent, _blocks in pblocks]
1146 remaining = [(parent, []) for parent, _blocks in pblocks]
1147 for idx, (parent, blocks) in enumerate(pblocks):
1147 for idx, (parent, blocks) in enumerate(pblocks):
1148 for (a1, a2, b1, b2), _t in blocks:
1148 for (a1, a2, b1, b2), _t in blocks:
1149 if a2 - a1 >= b2 - b1:
1149 if a2 - a1 >= b2 - b1:
1150 for bk in xrange(b1, b2):
1150 for bk in xrange(b1, b2):
1151 if child[0][bk][0] == childfctx:
1151 if child[0][bk][0] == childfctx:
1152 ak = min(a1 + (bk - b1), a2 - 1)
1152 ak = min(a1 + (bk - b1), a2 - 1)
1153 child[0][bk] = parent[0][ak]
1153 child[0][bk] = parent[0][ak]
1154 else:
1154 else:
1155 remaining[idx][1].append((a1, a2, b1, b2))
1155 remaining[idx][1].append((a1, a2, b1, b2))
1156
1156
1157 # Then, look at anything left, which might involve repeating the last
1157 # Then, look at anything left, which might involve repeating the last
1158 # line.
1158 # line.
1159 for parent, blocks in remaining:
1159 for parent, blocks in remaining:
1160 for a1, a2, b1, b2 in blocks:
1160 for a1, a2, b1, b2 in blocks:
1161 for bk in xrange(b1, b2):
1161 for bk in xrange(b1, b2):
1162 if child[0][bk][0] == childfctx:
1162 if child[0][bk][0] == childfctx:
1163 ak = min(a1 + (bk - b1), a2 - 1)
1163 ak = min(a1 + (bk - b1), a2 - 1)
1164 child[0][bk] = parent[0][ak]
1164 child[0][bk] = parent[0][ak]
1165 return child
1165 return child
1166
1166
1167 class filectx(basefilectx):
1167 class filectx(basefilectx):
1168 """A filecontext object makes access to data related to a particular
1168 """A filecontext object makes access to data related to a particular
1169 filerevision convenient."""
1169 filerevision convenient."""
1170 def __init__(self, repo, path, changeid=None, fileid=None,
1170 def __init__(self, repo, path, changeid=None, fileid=None,
1171 filelog=None, changectx=None):
1171 filelog=None, changectx=None):
1172 """changeid can be a changeset revision, node, or tag.
1172 """changeid can be a changeset revision, node, or tag.
1173 fileid can be a file revision or node."""
1173 fileid can be a file revision or node."""
1174 self._repo = repo
1174 self._repo = repo
1175 self._path = path
1175 self._path = path
1176
1176
1177 assert (changeid is not None
1177 assert (changeid is not None
1178 or fileid is not None
1178 or fileid is not None
1179 or changectx is not None), \
1179 or changectx is not None), \
1180 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1180 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1181 % (changeid, fileid, changectx))
1181 % (changeid, fileid, changectx))
1182
1182
1183 if filelog is not None:
1183 if filelog is not None:
1184 self._filelog = filelog
1184 self._filelog = filelog
1185
1185
1186 if changeid is not None:
1186 if changeid is not None:
1187 self._changeid = changeid
1187 self._changeid = changeid
1188 if changectx is not None:
1188 if changectx is not None:
1189 self._changectx = changectx
1189 self._changectx = changectx
1190 if fileid is not None:
1190 if fileid is not None:
1191 self._fileid = fileid
1191 self._fileid = fileid
1192
1192
1193 @propertycache
1193 @propertycache
1194 def _changectx(self):
1194 def _changectx(self):
1195 try:
1195 try:
1196 return changectx(self._repo, self._changeid)
1196 return changectx(self._repo, self._changeid)
1197 except error.FilteredRepoLookupError:
1197 except error.FilteredRepoLookupError:
1198 # Linkrev may point to any revision in the repository. When the
1198 # Linkrev may point to any revision in the repository. When the
1199 # repository is filtered this may lead to `filectx` trying to build
1199 # repository is filtered this may lead to `filectx` trying to build
1200 # `changectx` for filtered revision. In such case we fallback to
1200 # `changectx` for filtered revision. In such case we fallback to
1201 # creating `changectx` on the unfiltered version of the reposition.
1201 # creating `changectx` on the unfiltered version of the reposition.
1202 # This fallback should not be an issue because `changectx` from
1202 # This fallback should not be an issue because `changectx` from
1203 # `filectx` are not used in complex operations that care about
1203 # `filectx` are not used in complex operations that care about
1204 # filtering.
1204 # filtering.
1205 #
1205 #
1206 # This fallback is a cheap and dirty fix that prevent several
1206 # This fallback is a cheap and dirty fix that prevent several
1207 # crashes. It does not ensure the behavior is correct. However the
1207 # crashes. It does not ensure the behavior is correct. However the
1208 # behavior was not correct before filtering either and "incorrect
1208 # behavior was not correct before filtering either and "incorrect
1209 # behavior" is seen as better as "crash"
1209 # behavior" is seen as better as "crash"
1210 #
1210 #
1211 # Linkrevs have several serious troubles with filtering that are
1211 # Linkrevs have several serious troubles with filtering that are
1212 # complicated to solve. Proper handling of the issue here should be
1212 # complicated to solve. Proper handling of the issue here should be
1213 # considered when solving linkrev issue are on the table.
1213 # considered when solving linkrev issue are on the table.
1214 return changectx(self._repo.unfiltered(), self._changeid)
1214 return changectx(self._repo.unfiltered(), self._changeid)
1215
1215
1216 def filectx(self, fileid, changeid=None):
1216 def filectx(self, fileid, changeid=None):
1217 '''opens an arbitrary revision of the file without
1217 '''opens an arbitrary revision of the file without
1218 opening a new filelog'''
1218 opening a new filelog'''
1219 return filectx(self._repo, self._path, fileid=fileid,
1219 return filectx(self._repo, self._path, fileid=fileid,
1220 filelog=self._filelog, changeid=changeid)
1220 filelog=self._filelog, changeid=changeid)
1221
1221
1222 def rawdata(self):
1222 def rawdata(self):
1223 return self._filelog.revision(self._filenode, raw=True)
1223 return self._filelog.revision(self._filenode, raw=True)
1224
1224
1225 def rawflags(self):
1225 def rawflags(self):
1226 """low-level revlog flags"""
1226 """low-level revlog flags"""
1227 return self._filelog.flags(self._filerev)
1227 return self._filelog.flags(self._filerev)
1228
1228
1229 def data(self):
1229 def data(self):
1230 try:
1230 try:
1231 return self._filelog.read(self._filenode)
1231 return self._filelog.read(self._filenode)
1232 except error.CensoredNodeError:
1232 except error.CensoredNodeError:
1233 if self._repo.ui.config("censor", "policy") == "ignore":
1233 if self._repo.ui.config("censor", "policy") == "ignore":
1234 return ""
1234 return ""
1235 raise error.Abort(_("censored node: %s") % short(self._filenode),
1235 raise error.Abort(_("censored node: %s") % short(self._filenode),
1236 hint=_("set censor.policy to ignore errors"))
1236 hint=_("set censor.policy to ignore errors"))
1237
1237
1238 def size(self):
1238 def size(self):
1239 return self._filelog.size(self._filerev)
1239 return self._filelog.size(self._filerev)
1240
1240
1241 @propertycache
1241 @propertycache
1242 def _copied(self):
1242 def _copied(self):
1243 """check if file was actually renamed in this changeset revision
1243 """check if file was actually renamed in this changeset revision
1244
1244
1245 If rename logged in file revision, we report copy for changeset only
1245 If rename logged in file revision, we report copy for changeset only
1246 if file revisions linkrev points back to the changeset in question
1246 if file revisions linkrev points back to the changeset in question
1247 or both changeset parents contain different file revisions.
1247 or both changeset parents contain different file revisions.
1248 """
1248 """
1249
1249
1250 renamed = self._filelog.renamed(self._filenode)
1250 renamed = self._filelog.renamed(self._filenode)
1251 if not renamed:
1251 if not renamed:
1252 return renamed
1252 return renamed
1253
1253
1254 if self.rev() == self.linkrev():
1254 if self.rev() == self.linkrev():
1255 return renamed
1255 return renamed
1256
1256
1257 name = self.path()
1257 name = self.path()
1258 fnode = self._filenode
1258 fnode = self._filenode
1259 for p in self._changectx.parents():
1259 for p in self._changectx.parents():
1260 try:
1260 try:
1261 if fnode == p.filenode(name):
1261 if fnode == p.filenode(name):
1262 return None
1262 return None
1263 except error.LookupError:
1263 except error.LookupError:
1264 pass
1264 pass
1265 return renamed
1265 return renamed
1266
1266
1267 def children(self):
1267 def children(self):
1268 # hard for renames
1268 # hard for renames
1269 c = self._filelog.children(self._filenode)
1269 c = self._filelog.children(self._filenode)
1270 return [filectx(self._repo, self._path, fileid=x,
1270 return [filectx(self._repo, self._path, fileid=x,
1271 filelog=self._filelog) for x in c]
1271 filelog=self._filelog) for x in c]
1272
1272
1273 class committablectx(basectx):
1273 class committablectx(basectx):
1274 """A committablectx object provides common functionality for a context that
1274 """A committablectx object provides common functionality for a context that
1275 wants the ability to commit, e.g. workingctx or memctx."""
1275 wants the ability to commit, e.g. workingctx or memctx."""
1276 def __init__(self, repo, text="", user=None, date=None, extra=None,
1276 def __init__(self, repo, text="", user=None, date=None, extra=None,
1277 changes=None):
1277 changes=None):
1278 self._repo = repo
1278 self._repo = repo
1279 self._rev = None
1279 self._rev = None
1280 self._node = None
1280 self._node = None
1281 self._text = text
1281 self._text = text
1282 if date:
1282 if date:
1283 self._date = util.parsedate(date)
1283 self._date = util.parsedate(date)
1284 if user:
1284 if user:
1285 self._user = user
1285 self._user = user
1286 if changes:
1286 if changes:
1287 self._status = changes
1287 self._status = changes
1288
1288
1289 self._extra = {}
1289 self._extra = {}
1290 if extra:
1290 if extra:
1291 self._extra = extra.copy()
1291 self._extra = extra.copy()
1292 if 'branch' not in self._extra:
1292 if 'branch' not in self._extra:
1293 try:
1293 try:
1294 branch = encoding.fromlocal(self._repo.dirstate.branch())
1294 branch = encoding.fromlocal(self._repo.dirstate.branch())
1295 except UnicodeDecodeError:
1295 except UnicodeDecodeError:
1296 raise error.Abort(_('branch name not in UTF-8!'))
1296 raise error.Abort(_('branch name not in UTF-8!'))
1297 self._extra['branch'] = branch
1297 self._extra['branch'] = branch
1298 if self._extra['branch'] == '':
1298 if self._extra['branch'] == '':
1299 self._extra['branch'] = 'default'
1299 self._extra['branch'] = 'default'
1300
1300
1301 def __bytes__(self):
1301 def __bytes__(self):
1302 return bytes(self._parents[0]) + "+"
1302 return bytes(self._parents[0]) + "+"
1303
1303
1304 __str__ = encoding.strmethod(__bytes__)
1304 __str__ = encoding.strmethod(__bytes__)
1305
1305
1306 def __nonzero__(self):
1306 def __nonzero__(self):
1307 return True
1307 return True
1308
1308
1309 __bool__ = __nonzero__
1309 __bool__ = __nonzero__
1310
1310
1311 def _buildflagfunc(self):
1311 def _buildflagfunc(self):
1312 # Create a fallback function for getting file flags when the
1312 # Create a fallback function for getting file flags when the
1313 # filesystem doesn't support them
1313 # filesystem doesn't support them
1314
1314
1315 copiesget = self._repo.dirstate.copies().get
1315 copiesget = self._repo.dirstate.copies().get
1316 parents = self.parents()
1316 parents = self.parents()
1317 if len(parents) < 2:
1317 if len(parents) < 2:
1318 # when we have one parent, it's easy: copy from parent
1318 # when we have one parent, it's easy: copy from parent
1319 man = parents[0].manifest()
1319 man = parents[0].manifest()
1320 def func(f):
1320 def func(f):
1321 f = copiesget(f, f)
1321 f = copiesget(f, f)
1322 return man.flags(f)
1322 return man.flags(f)
1323 else:
1323 else:
1324 # merges are tricky: we try to reconstruct the unstored
1324 # merges are tricky: we try to reconstruct the unstored
1325 # result from the merge (issue1802)
1325 # result from the merge (issue1802)
1326 p1, p2 = parents
1326 p1, p2 = parents
1327 pa = p1.ancestor(p2)
1327 pa = p1.ancestor(p2)
1328 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1328 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1329
1329
1330 def func(f):
1330 def func(f):
1331 f = copiesget(f, f) # may be wrong for merges with copies
1331 f = copiesget(f, f) # may be wrong for merges with copies
1332 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1332 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1333 if fl1 == fl2:
1333 if fl1 == fl2:
1334 return fl1
1334 return fl1
1335 if fl1 == fla:
1335 if fl1 == fla:
1336 return fl2
1336 return fl2
1337 if fl2 == fla:
1337 if fl2 == fla:
1338 return fl1
1338 return fl1
1339 return '' # punt for conflicts
1339 return '' # punt for conflicts
1340
1340
1341 return func
1341 return func
1342
1342
1343 @propertycache
1343 @propertycache
1344 def _flagfunc(self):
1344 def _flagfunc(self):
1345 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1345 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1346
1346
1347 @propertycache
1347 @propertycache
1348 def _status(self):
1348 def _status(self):
1349 return self._repo.status()
1349 return self._repo.status()
1350
1350
1351 @propertycache
1351 @propertycache
1352 def _user(self):
1352 def _user(self):
1353 return self._repo.ui.username()
1353 return self._repo.ui.username()
1354
1354
1355 @propertycache
1355 @propertycache
1356 def _date(self):
1356 def _date(self):
1357 ui = self._repo.ui
1357 ui = self._repo.ui
1358 date = ui.configdate('devel', 'default-date')
1358 date = ui.configdate('devel', 'default-date')
1359 if date is None:
1359 if date is None:
1360 date = util.makedate()
1360 date = util.makedate()
1361 return date
1361 return date
1362
1362
1363 def subrev(self, subpath):
1363 def subrev(self, subpath):
1364 return None
1364 return None
1365
1365
1366 def manifestnode(self):
1366 def manifestnode(self):
1367 return None
1367 return None
1368 def user(self):
1368 def user(self):
1369 return self._user or self._repo.ui.username()
1369 return self._user or self._repo.ui.username()
1370 def date(self):
1370 def date(self):
1371 return self._date
1371 return self._date
1372 def description(self):
1372 def description(self):
1373 return self._text
1373 return self._text
1374 def files(self):
1374 def files(self):
1375 return sorted(self._status.modified + self._status.added +
1375 return sorted(self._status.modified + self._status.added +
1376 self._status.removed)
1376 self._status.removed)
1377
1377
1378 def modified(self):
1378 def modified(self):
1379 return self._status.modified
1379 return self._status.modified
1380 def added(self):
1380 def added(self):
1381 return self._status.added
1381 return self._status.added
1382 def removed(self):
1382 def removed(self):
1383 return self._status.removed
1383 return self._status.removed
1384 def deleted(self):
1384 def deleted(self):
1385 return self._status.deleted
1385 return self._status.deleted
1386 def branch(self):
1386 def branch(self):
1387 return encoding.tolocal(self._extra['branch'])
1387 return encoding.tolocal(self._extra['branch'])
1388 def closesbranch(self):
1388 def closesbranch(self):
1389 return 'close' in self._extra
1389 return 'close' in self._extra
1390 def extra(self):
1390 def extra(self):
1391 return self._extra
1391 return self._extra
1392
1392
1393 def tags(self):
1393 def tags(self):
1394 return []
1394 return []
1395
1395
1396 def bookmarks(self):
1396 def bookmarks(self):
1397 b = []
1397 b = []
1398 for p in self.parents():
1398 for p in self.parents():
1399 b.extend(p.bookmarks())
1399 b.extend(p.bookmarks())
1400 return b
1400 return b
1401
1401
1402 def phase(self):
1402 def phase(self):
1403 phase = phases.draft # default phase to draft
1403 phase = phases.draft # default phase to draft
1404 for p in self.parents():
1404 for p in self.parents():
1405 phase = max(phase, p.phase())
1405 phase = max(phase, p.phase())
1406 return phase
1406 return phase
1407
1407
1408 def hidden(self):
1408 def hidden(self):
1409 return False
1409 return False
1410
1410
1411 def children(self):
1411 def children(self):
1412 return []
1412 return []
1413
1413
1414 def flags(self, path):
1414 def flags(self, path):
1415 if r'_manifest' in self.__dict__:
1415 if r'_manifest' in self.__dict__:
1416 try:
1416 try:
1417 return self._manifest.flags(path)
1417 return self._manifest.flags(path)
1418 except KeyError:
1418 except KeyError:
1419 return ''
1419 return ''
1420
1420
1421 try:
1421 try:
1422 return self._flagfunc(path)
1422 return self._flagfunc(path)
1423 except OSError:
1423 except OSError:
1424 return ''
1424 return ''
1425
1425
1426 def ancestor(self, c2):
1426 def ancestor(self, c2):
1427 """return the "best" ancestor context of self and c2"""
1427 """return the "best" ancestor context of self and c2"""
1428 return self._parents[0].ancestor(c2) # punt on two parents for now
1428 return self._parents[0].ancestor(c2) # punt on two parents for now
1429
1429
1430 def walk(self, match):
1430 def walk(self, match):
1431 '''Generates matching file names.'''
1431 '''Generates matching file names.'''
1432 return sorted(self._repo.dirstate.walk(match,
1432 return sorted(self._repo.dirstate.walk(match,
1433 subrepos=sorted(self.substate),
1433 subrepos=sorted(self.substate),
1434 unknown=True, ignored=False))
1434 unknown=True, ignored=False))
1435
1435
1436 def matches(self, match):
1436 def matches(self, match):
1437 return sorted(self._repo.dirstate.matches(match))
1437 return sorted(self._repo.dirstate.matches(match))
1438
1438
1439 def ancestors(self):
1439 def ancestors(self):
1440 for p in self._parents:
1440 for p in self._parents:
1441 yield p
1441 yield p
1442 for a in self._repo.changelog.ancestors(
1442 for a in self._repo.changelog.ancestors(
1443 [p.rev() for p in self._parents]):
1443 [p.rev() for p in self._parents]):
1444 yield changectx(self._repo, a)
1444 yield changectx(self._repo, a)
1445
1445
1446 def markcommitted(self, node):
1446 def markcommitted(self, node):
1447 """Perform post-commit cleanup necessary after committing this ctx
1447 """Perform post-commit cleanup necessary after committing this ctx
1448
1448
1449 Specifically, this updates backing stores this working context
1449 Specifically, this updates backing stores this working context
1450 wraps to reflect the fact that the changes reflected by this
1450 wraps to reflect the fact that the changes reflected by this
1451 workingctx have been committed. For example, it marks
1451 workingctx have been committed. For example, it marks
1452 modified and added files as normal in the dirstate.
1452 modified and added files as normal in the dirstate.
1453
1453
1454 """
1454 """
1455
1455
1456 with self._repo.dirstate.parentchange():
1456 with self._repo.dirstate.parentchange():
1457 for f in self.modified() + self.added():
1457 for f in self.modified() + self.added():
1458 self._repo.dirstate.normal(f)
1458 self._repo.dirstate.normal(f)
1459 for f in self.removed():
1459 for f in self.removed():
1460 self._repo.dirstate.drop(f)
1460 self._repo.dirstate.drop(f)
1461 self._repo.dirstate.setparents(node)
1461 self._repo.dirstate.setparents(node)
1462
1462
1463 # write changes out explicitly, because nesting wlock at
1463 # write changes out explicitly, because nesting wlock at
1464 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1464 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1465 # from immediately doing so for subsequent changing files
1465 # from immediately doing so for subsequent changing files
1466 self._repo.dirstate.write(self._repo.currenttransaction())
1466 self._repo.dirstate.write(self._repo.currenttransaction())
1467
1467
1468 def dirty(self, missing=False, merge=True, branch=True):
1468 def dirty(self, missing=False, merge=True, branch=True):
1469 return False
1469 return False
1470
1470
1471 class workingctx(committablectx):
1471 class workingctx(committablectx):
1472 """A workingctx object makes access to data related to
1472 """A workingctx object makes access to data related to
1473 the current working directory convenient.
1473 the current working directory convenient.
1474 date - any valid date string or (unixtime, offset), or None.
1474 date - any valid date string or (unixtime, offset), or None.
1475 user - username string, or None.
1475 user - username string, or None.
1476 extra - a dictionary of extra values, or None.
1476 extra - a dictionary of extra values, or None.
1477 changes - a list of file lists as returned by localrepo.status()
1477 changes - a list of file lists as returned by localrepo.status()
1478 or None to use the repository status.
1478 or None to use the repository status.
1479 """
1479 """
1480 def __init__(self, repo, text="", user=None, date=None, extra=None,
1480 def __init__(self, repo, text="", user=None, date=None, extra=None,
1481 changes=None):
1481 changes=None):
1482 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1482 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1483
1483
1484 def __iter__(self):
1484 def __iter__(self):
1485 d = self._repo.dirstate
1485 d = self._repo.dirstate
1486 for f in d:
1486 for f in d:
1487 if d[f] != 'r':
1487 if d[f] != 'r':
1488 yield f
1488 yield f
1489
1489
1490 def __contains__(self, key):
1490 def __contains__(self, key):
1491 return self._repo.dirstate[key] not in "?r"
1491 return self._repo.dirstate[key] not in "?r"
1492
1492
1493 def hex(self):
1493 def hex(self):
1494 return hex(wdirid)
1494 return hex(wdirid)
1495
1495
1496 @propertycache
1496 @propertycache
1497 def _parents(self):
1497 def _parents(self):
1498 p = self._repo.dirstate.parents()
1498 p = self._repo.dirstate.parents()
1499 if p[1] == nullid:
1499 if p[1] == nullid:
1500 p = p[:-1]
1500 p = p[:-1]
1501 return [changectx(self._repo, x) for x in p]
1501 return [changectx(self._repo, x) for x in p]
1502
1502
1503 def filectx(self, path, filelog=None):
1503 def filectx(self, path, filelog=None):
1504 """get a file context from the working directory"""
1504 """get a file context from the working directory"""
1505 return workingfilectx(self._repo, path, workingctx=self,
1505 return workingfilectx(self._repo, path, workingctx=self,
1506 filelog=filelog)
1506 filelog=filelog)
1507
1507
1508 def dirty(self, missing=False, merge=True, branch=True):
1508 def dirty(self, missing=False, merge=True, branch=True):
1509 "check whether a working directory is modified"
1509 "check whether a working directory is modified"
1510 # check subrepos first
1510 # check subrepos first
1511 for s in sorted(self.substate):
1511 for s in sorted(self.substate):
1512 if self.sub(s).dirty(missing=missing):
1512 if self.sub(s).dirty(missing=missing):
1513 return True
1513 return True
1514 # check current working dir
1514 # check current working dir
1515 return ((merge and self.p2()) or
1515 return ((merge and self.p2()) or
1516 (branch and self.branch() != self.p1().branch()) or
1516 (branch and self.branch() != self.p1().branch()) or
1517 self.modified() or self.added() or self.removed() or
1517 self.modified() or self.added() or self.removed() or
1518 (missing and self.deleted()))
1518 (missing and self.deleted()))
1519
1519
1520 def add(self, list, prefix=""):
1520 def add(self, list, prefix=""):
1521 with self._repo.wlock():
1521 with self._repo.wlock():
1522 ui, ds = self._repo.ui, self._repo.dirstate
1522 ui, ds = self._repo.ui, self._repo.dirstate
1523 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1523 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1524 rejected = []
1524 rejected = []
1525 lstat = self._repo.wvfs.lstat
1525 lstat = self._repo.wvfs.lstat
1526 for f in list:
1526 for f in list:
1527 # ds.pathto() returns an absolute file when this is invoked from
1527 # ds.pathto() returns an absolute file when this is invoked from
1528 # the keyword extension. That gets flagged as non-portable on
1528 # the keyword extension. That gets flagged as non-portable on
1529 # Windows, since it contains the drive letter and colon.
1529 # Windows, since it contains the drive letter and colon.
1530 scmutil.checkportable(ui, os.path.join(prefix, f))
1530 scmutil.checkportable(ui, os.path.join(prefix, f))
1531 try:
1531 try:
1532 st = lstat(f)
1532 st = lstat(f)
1533 except OSError:
1533 except OSError:
1534 ui.warn(_("%s does not exist!\n") % uipath(f))
1534 ui.warn(_("%s does not exist!\n") % uipath(f))
1535 rejected.append(f)
1535 rejected.append(f)
1536 continue
1536 continue
1537 if st.st_size > 10000000:
1537 if st.st_size > 10000000:
1538 ui.warn(_("%s: up to %d MB of RAM may be required "
1538 ui.warn(_("%s: up to %d MB of RAM may be required "
1539 "to manage this file\n"
1539 "to manage this file\n"
1540 "(use 'hg revert %s' to cancel the "
1540 "(use 'hg revert %s' to cancel the "
1541 "pending addition)\n")
1541 "pending addition)\n")
1542 % (f, 3 * st.st_size // 1000000, uipath(f)))
1542 % (f, 3 * st.st_size // 1000000, uipath(f)))
1543 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1543 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1544 ui.warn(_("%s not added: only files and symlinks "
1544 ui.warn(_("%s not added: only files and symlinks "
1545 "supported currently\n") % uipath(f))
1545 "supported currently\n") % uipath(f))
1546 rejected.append(f)
1546 rejected.append(f)
1547 elif ds[f] in 'amn':
1547 elif ds[f] in 'amn':
1548 ui.warn(_("%s already tracked!\n") % uipath(f))
1548 ui.warn(_("%s already tracked!\n") % uipath(f))
1549 elif ds[f] == 'r':
1549 elif ds[f] == 'r':
1550 ds.normallookup(f)
1550 ds.normallookup(f)
1551 else:
1551 else:
1552 ds.add(f)
1552 ds.add(f)
1553 return rejected
1553 return rejected
1554
1554
1555 def forget(self, files, prefix=""):
1555 def forget(self, files, prefix=""):
1556 with self._repo.wlock():
1556 with self._repo.wlock():
1557 ds = self._repo.dirstate
1557 ds = self._repo.dirstate
1558 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1558 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1559 rejected = []
1559 rejected = []
1560 for f in files:
1560 for f in files:
1561 if f not in self._repo.dirstate:
1561 if f not in self._repo.dirstate:
1562 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1562 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1563 rejected.append(f)
1563 rejected.append(f)
1564 elif self._repo.dirstate[f] != 'a':
1564 elif self._repo.dirstate[f] != 'a':
1565 self._repo.dirstate.remove(f)
1565 self._repo.dirstate.remove(f)
1566 else:
1566 else:
1567 self._repo.dirstate.drop(f)
1567 self._repo.dirstate.drop(f)
1568 return rejected
1568 return rejected
1569
1569
1570 def undelete(self, list):
1570 def undelete(self, list):
1571 pctxs = self.parents()
1571 pctxs = self.parents()
1572 with self._repo.wlock():
1572 with self._repo.wlock():
1573 ds = self._repo.dirstate
1573 ds = self._repo.dirstate
1574 for f in list:
1574 for f in list:
1575 if self._repo.dirstate[f] != 'r':
1575 if self._repo.dirstate[f] != 'r':
1576 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1576 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1577 else:
1577 else:
1578 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1578 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1579 t = fctx.data()
1579 t = fctx.data()
1580 self._repo.wwrite(f, t, fctx.flags())
1580 self._repo.wwrite(f, t, fctx.flags())
1581 self._repo.dirstate.normal(f)
1581 self._repo.dirstate.normal(f)
1582
1582
1583 def copy(self, source, dest):
1583 def copy(self, source, dest):
1584 try:
1584 try:
1585 st = self._repo.wvfs.lstat(dest)
1585 st = self._repo.wvfs.lstat(dest)
1586 except OSError as err:
1586 except OSError as err:
1587 if err.errno != errno.ENOENT:
1587 if err.errno != errno.ENOENT:
1588 raise
1588 raise
1589 self._repo.ui.warn(_("%s does not exist!\n")
1589 self._repo.ui.warn(_("%s does not exist!\n")
1590 % self._repo.dirstate.pathto(dest))
1590 % self._repo.dirstate.pathto(dest))
1591 return
1591 return
1592 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1592 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1593 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1593 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1594 "symbolic link\n")
1594 "symbolic link\n")
1595 % self._repo.dirstate.pathto(dest))
1595 % self._repo.dirstate.pathto(dest))
1596 else:
1596 else:
1597 with self._repo.wlock():
1597 with self._repo.wlock():
1598 if self._repo.dirstate[dest] in '?':
1598 if self._repo.dirstate[dest] in '?':
1599 self._repo.dirstate.add(dest)
1599 self._repo.dirstate.add(dest)
1600 elif self._repo.dirstate[dest] in 'r':
1600 elif self._repo.dirstate[dest] in 'r':
1601 self._repo.dirstate.normallookup(dest)
1601 self._repo.dirstate.normallookup(dest)
1602 self._repo.dirstate.copy(source, dest)
1602 self._repo.dirstate.copy(source, dest)
1603
1603
1604 def match(self, pats=None, include=None, exclude=None, default='glob',
1604 def match(self, pats=None, include=None, exclude=None, default='glob',
1605 listsubrepos=False, badfn=None):
1605 listsubrepos=False, badfn=None):
1606 r = self._repo
1606 r = self._repo
1607
1607
1608 # Only a case insensitive filesystem needs magic to translate user input
1608 # Only a case insensitive filesystem needs magic to translate user input
1609 # to actual case in the filesystem.
1609 # to actual case in the filesystem.
1610 icasefs = not util.fscasesensitive(r.root)
1610 icasefs = not util.fscasesensitive(r.root)
1611 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1611 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1612 default, auditor=r.auditor, ctx=self,
1612 default, auditor=r.auditor, ctx=self,
1613 listsubrepos=listsubrepos, badfn=badfn,
1613 listsubrepos=listsubrepos, badfn=badfn,
1614 icasefs=icasefs)
1614 icasefs=icasefs)
1615
1615
1616 def flushall(self):
1616 def flushall(self):
1617 pass # For overlayworkingfilectx compatibility.
1617 pass # For overlayworkingfilectx compatibility.
1618
1618
1619 def _filtersuspectsymlink(self, files):
1619 def _filtersuspectsymlink(self, files):
1620 if not files or self._repo.dirstate._checklink:
1620 if not files or self._repo.dirstate._checklink:
1621 return files
1621 return files
1622
1622
1623 # Symlink placeholders may get non-symlink-like contents
1623 # Symlink placeholders may get non-symlink-like contents
1624 # via user error or dereferencing by NFS or Samba servers,
1624 # via user error or dereferencing by NFS or Samba servers,
1625 # so we filter out any placeholders that don't look like a
1625 # so we filter out any placeholders that don't look like a
1626 # symlink
1626 # symlink
1627 sane = []
1627 sane = []
1628 for f in files:
1628 for f in files:
1629 if self.flags(f) == 'l':
1629 if self.flags(f) == 'l':
1630 d = self[f].data()
1630 d = self[f].data()
1631 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1631 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1632 self._repo.ui.debug('ignoring suspect symlink placeholder'
1632 self._repo.ui.debug('ignoring suspect symlink placeholder'
1633 ' "%s"\n' % f)
1633 ' "%s"\n' % f)
1634 continue
1634 continue
1635 sane.append(f)
1635 sane.append(f)
1636 return sane
1636 return sane
1637
1637
1638 def _checklookup(self, files):
1638 def _checklookup(self, files):
1639 # check for any possibly clean files
1639 # check for any possibly clean files
1640 if not files:
1640 if not files:
1641 return [], [], []
1641 return [], [], []
1642
1642
1643 modified = []
1643 modified = []
1644 deleted = []
1644 deleted = []
1645 fixup = []
1645 fixup = []
1646 pctx = self._parents[0]
1646 pctx = self._parents[0]
1647 # do a full compare of any files that might have changed
1647 # do a full compare of any files that might have changed
1648 for f in sorted(files):
1648 for f in sorted(files):
1649 try:
1649 try:
1650 # This will return True for a file that got replaced by a
1650 # This will return True for a file that got replaced by a
1651 # directory in the interim, but fixing that is pretty hard.
1651 # directory in the interim, but fixing that is pretty hard.
1652 if (f not in pctx or self.flags(f) != pctx.flags(f)
1652 if (f not in pctx or self.flags(f) != pctx.flags(f)
1653 or pctx[f].cmp(self[f])):
1653 or pctx[f].cmp(self[f])):
1654 modified.append(f)
1654 modified.append(f)
1655 else:
1655 else:
1656 fixup.append(f)
1656 fixup.append(f)
1657 except (IOError, OSError):
1657 except (IOError, OSError):
1658 # A file become inaccessible in between? Mark it as deleted,
1658 # A file become inaccessible in between? Mark it as deleted,
1659 # matching dirstate behavior (issue5584).
1659 # matching dirstate behavior (issue5584).
1660 # The dirstate has more complex behavior around whether a
1660 # The dirstate has more complex behavior around whether a
1661 # missing file matches a directory, etc, but we don't need to
1661 # missing file matches a directory, etc, but we don't need to
1662 # bother with that: if f has made it to this point, we're sure
1662 # bother with that: if f has made it to this point, we're sure
1663 # it's in the dirstate.
1663 # it's in the dirstate.
1664 deleted.append(f)
1664 deleted.append(f)
1665
1665
1666 return modified, deleted, fixup
1666 return modified, deleted, fixup
1667
1667
1668 def _poststatusfixup(self, status, fixup):
1668 def _poststatusfixup(self, status, fixup):
1669 """update dirstate for files that are actually clean"""
1669 """update dirstate for files that are actually clean"""
1670 poststatus = self._repo.postdsstatus()
1670 poststatus = self._repo.postdsstatus()
1671 if fixup or poststatus:
1671 if fixup or poststatus:
1672 try:
1672 try:
1673 oldid = self._repo.dirstate.identity()
1673 oldid = self._repo.dirstate.identity()
1674
1674
1675 # updating the dirstate is optional
1675 # updating the dirstate is optional
1676 # so we don't wait on the lock
1676 # so we don't wait on the lock
1677 # wlock can invalidate the dirstate, so cache normal _after_
1677 # wlock can invalidate the dirstate, so cache normal _after_
1678 # taking the lock
1678 # taking the lock
1679 with self._repo.wlock(False):
1679 with self._repo.wlock(False):
1680 if self._repo.dirstate.identity() == oldid:
1680 if self._repo.dirstate.identity() == oldid:
1681 if fixup:
1681 if fixup:
1682 normal = self._repo.dirstate.normal
1682 normal = self._repo.dirstate.normal
1683 for f in fixup:
1683 for f in fixup:
1684 normal(f)
1684 normal(f)
1685 # write changes out explicitly, because nesting
1685 # write changes out explicitly, because nesting
1686 # wlock at runtime may prevent 'wlock.release()'
1686 # wlock at runtime may prevent 'wlock.release()'
1687 # after this block from doing so for subsequent
1687 # after this block from doing so for subsequent
1688 # changing files
1688 # changing files
1689 tr = self._repo.currenttransaction()
1689 tr = self._repo.currenttransaction()
1690 self._repo.dirstate.write(tr)
1690 self._repo.dirstate.write(tr)
1691
1691
1692 if poststatus:
1692 if poststatus:
1693 for ps in poststatus:
1693 for ps in poststatus:
1694 ps(self, status)
1694 ps(self, status)
1695 else:
1695 else:
1696 # in this case, writing changes out breaks
1696 # in this case, writing changes out breaks
1697 # consistency, because .hg/dirstate was
1697 # consistency, because .hg/dirstate was
1698 # already changed simultaneously after last
1698 # already changed simultaneously after last
1699 # caching (see also issue5584 for detail)
1699 # caching (see also issue5584 for detail)
1700 self._repo.ui.debug('skip updating dirstate: '
1700 self._repo.ui.debug('skip updating dirstate: '
1701 'identity mismatch\n')
1701 'identity mismatch\n')
1702 except error.LockError:
1702 except error.LockError:
1703 pass
1703 pass
1704 finally:
1704 finally:
1705 # Even if the wlock couldn't be grabbed, clear out the list.
1705 # Even if the wlock couldn't be grabbed, clear out the list.
1706 self._repo.clearpostdsstatus()
1706 self._repo.clearpostdsstatus()
1707
1707
1708 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1708 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1709 '''Gets the status from the dirstate -- internal use only.'''
1709 '''Gets the status from the dirstate -- internal use only.'''
1710 subrepos = []
1710 subrepos = []
1711 if '.hgsub' in self:
1711 if '.hgsub' in self:
1712 subrepos = sorted(self.substate)
1712 subrepos = sorted(self.substate)
1713 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1713 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1714 clean=clean, unknown=unknown)
1714 clean=clean, unknown=unknown)
1715
1715
1716 # check for any possibly clean files
1716 # check for any possibly clean files
1717 fixup = []
1717 fixup = []
1718 if cmp:
1718 if cmp:
1719 modified2, deleted2, fixup = self._checklookup(cmp)
1719 modified2, deleted2, fixup = self._checklookup(cmp)
1720 s.modified.extend(modified2)
1720 s.modified.extend(modified2)
1721 s.deleted.extend(deleted2)
1721 s.deleted.extend(deleted2)
1722
1722
1723 if fixup and clean:
1723 if fixup and clean:
1724 s.clean.extend(fixup)
1724 s.clean.extend(fixup)
1725
1725
1726 self._poststatusfixup(s, fixup)
1726 self._poststatusfixup(s, fixup)
1727
1727
1728 if match.always():
1728 if match.always():
1729 # cache for performance
1729 # cache for performance
1730 if s.unknown or s.ignored or s.clean:
1730 if s.unknown or s.ignored or s.clean:
1731 # "_status" is cached with list*=False in the normal route
1731 # "_status" is cached with list*=False in the normal route
1732 self._status = scmutil.status(s.modified, s.added, s.removed,
1732 self._status = scmutil.status(s.modified, s.added, s.removed,
1733 s.deleted, [], [], [])
1733 s.deleted, [], [], [])
1734 else:
1734 else:
1735 self._status = s
1735 self._status = s
1736
1736
1737 return s
1737 return s
1738
1738
1739 @propertycache
1739 @propertycache
1740 def _manifest(self):
1740 def _manifest(self):
1741 """generate a manifest corresponding to the values in self._status
1741 """generate a manifest corresponding to the values in self._status
1742
1742
1743 This reuse the file nodeid from parent, but we use special node
1743 This reuse the file nodeid from parent, but we use special node
1744 identifiers for added and modified files. This is used by manifests
1744 identifiers for added and modified files. This is used by manifests
1745 merge to see that files are different and by update logic to avoid
1745 merge to see that files are different and by update logic to avoid
1746 deleting newly added files.
1746 deleting newly added files.
1747 """
1747 """
1748 return self._buildstatusmanifest(self._status)
1748 return self._buildstatusmanifest(self._status)
1749
1749
1750 def _buildstatusmanifest(self, status):
1750 def _buildstatusmanifest(self, status):
1751 """Builds a manifest that includes the given status results."""
1751 """Builds a manifest that includes the given status results."""
1752 parents = self.parents()
1752 parents = self.parents()
1753
1753
1754 man = parents[0].manifest().copy()
1754 man = parents[0].manifest().copy()
1755
1755
1756 ff = self._flagfunc
1756 ff = self._flagfunc
1757 for i, l in ((addednodeid, status.added),
1757 for i, l in ((addednodeid, status.added),
1758 (modifiednodeid, status.modified)):
1758 (modifiednodeid, status.modified)):
1759 for f in l:
1759 for f in l:
1760 man[f] = i
1760 man[f] = i
1761 try:
1761 try:
1762 man.setflag(f, ff(f))
1762 man.setflag(f, ff(f))
1763 except OSError:
1763 except OSError:
1764 pass
1764 pass
1765
1765
1766 for f in status.deleted + status.removed:
1766 for f in status.deleted + status.removed:
1767 if f in man:
1767 if f in man:
1768 del man[f]
1768 del man[f]
1769
1769
1770 return man
1770 return man
1771
1771
1772 def _buildstatus(self, other, s, match, listignored, listclean,
1772 def _buildstatus(self, other, s, match, listignored, listclean,
1773 listunknown):
1773 listunknown):
1774 """build a status with respect to another context
1774 """build a status with respect to another context
1775
1775
1776 This includes logic for maintaining the fast path of status when
1776 This includes logic for maintaining the fast path of status when
1777 comparing the working directory against its parent, which is to skip
1777 comparing the working directory against its parent, which is to skip
1778 building a new manifest if self (working directory) is not comparing
1778 building a new manifest if self (working directory) is not comparing
1779 against its parent (repo['.']).
1779 against its parent (repo['.']).
1780 """
1780 """
1781 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1781 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1782 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1782 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1783 # might have accidentally ended up with the entire contents of the file
1783 # might have accidentally ended up with the entire contents of the file
1784 # they are supposed to be linking to.
1784 # they are supposed to be linking to.
1785 s.modified[:] = self._filtersuspectsymlink(s.modified)
1785 s.modified[:] = self._filtersuspectsymlink(s.modified)
1786 if other != self._repo['.']:
1786 if other != self._repo['.']:
1787 s = super(workingctx, self)._buildstatus(other, s, match,
1787 s = super(workingctx, self)._buildstatus(other, s, match,
1788 listignored, listclean,
1788 listignored, listclean,
1789 listunknown)
1789 listunknown)
1790 return s
1790 return s
1791
1791
1792 def _matchstatus(self, other, match):
1792 def _matchstatus(self, other, match):
1793 """override the match method with a filter for directory patterns
1793 """override the match method with a filter for directory patterns
1794
1794
1795 We use inheritance to customize the match.bad method only in cases of
1795 We use inheritance to customize the match.bad method only in cases of
1796 workingctx since it belongs only to the working directory when
1796 workingctx since it belongs only to the working directory when
1797 comparing against the parent changeset.
1797 comparing against the parent changeset.
1798
1798
1799 If we aren't comparing against the working directory's parent, then we
1799 If we aren't comparing against the working directory's parent, then we
1800 just use the default match object sent to us.
1800 just use the default match object sent to us.
1801 """
1801 """
1802 if other != self._repo['.']:
1802 if other != self._repo['.']:
1803 def bad(f, msg):
1803 def bad(f, msg):
1804 # 'f' may be a directory pattern from 'match.files()',
1804 # 'f' may be a directory pattern from 'match.files()',
1805 # so 'f not in ctx1' is not enough
1805 # so 'f not in ctx1' is not enough
1806 if f not in other and not other.hasdir(f):
1806 if f not in other and not other.hasdir(f):
1807 self._repo.ui.warn('%s: %s\n' %
1807 self._repo.ui.warn('%s: %s\n' %
1808 (self._repo.dirstate.pathto(f), msg))
1808 (self._repo.dirstate.pathto(f), msg))
1809 match.bad = bad
1809 match.bad = bad
1810 return match
1810 return match
1811
1811
1812 def markcommitted(self, node):
1812 def markcommitted(self, node):
1813 super(workingctx, self).markcommitted(node)
1813 super(workingctx, self).markcommitted(node)
1814
1814
1815 sparse.aftercommit(self._repo, node)
1815 sparse.aftercommit(self._repo, node)
1816
1816
1817 class committablefilectx(basefilectx):
1817 class committablefilectx(basefilectx):
1818 """A committablefilectx provides common functionality for a file context
1818 """A committablefilectx provides common functionality for a file context
1819 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1819 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1820 def __init__(self, repo, path, filelog=None, ctx=None):
1820 def __init__(self, repo, path, filelog=None, ctx=None):
1821 self._repo = repo
1821 self._repo = repo
1822 self._path = path
1822 self._path = path
1823 self._changeid = None
1823 self._changeid = None
1824 self._filerev = self._filenode = None
1824 self._filerev = self._filenode = None
1825
1825
1826 if filelog is not None:
1826 if filelog is not None:
1827 self._filelog = filelog
1827 self._filelog = filelog
1828 if ctx:
1828 if ctx:
1829 self._changectx = ctx
1829 self._changectx = ctx
1830
1830
1831 def __nonzero__(self):
1831 def __nonzero__(self):
1832 return True
1832 return True
1833
1833
1834 __bool__ = __nonzero__
1834 __bool__ = __nonzero__
1835
1835
1836 def linkrev(self):
1836 def linkrev(self):
1837 # linked to self._changectx no matter if file is modified or not
1837 # linked to self._changectx no matter if file is modified or not
1838 return self.rev()
1838 return self.rev()
1839
1839
1840 def parents(self):
1840 def parents(self):
1841 '''return parent filectxs, following copies if necessary'''
1841 '''return parent filectxs, following copies if necessary'''
1842 def filenode(ctx, path):
1842 def filenode(ctx, path):
1843 return ctx._manifest.get(path, nullid)
1843 return ctx._manifest.get(path, nullid)
1844
1844
1845 path = self._path
1845 path = self._path
1846 fl = self._filelog
1846 fl = self._filelog
1847 pcl = self._changectx._parents
1847 pcl = self._changectx._parents
1848 renamed = self.renamed()
1848 renamed = self.renamed()
1849
1849
1850 if renamed:
1850 if renamed:
1851 pl = [renamed + (None,)]
1851 pl = [renamed + (None,)]
1852 else:
1852 else:
1853 pl = [(path, filenode(pcl[0], path), fl)]
1853 pl = [(path, filenode(pcl[0], path), fl)]
1854
1854
1855 for pc in pcl[1:]:
1855 for pc in pcl[1:]:
1856 pl.append((path, filenode(pc, path), fl))
1856 pl.append((path, filenode(pc, path), fl))
1857
1857
1858 return [self._parentfilectx(p, fileid=n, filelog=l)
1858 return [self._parentfilectx(p, fileid=n, filelog=l)
1859 for p, n, l in pl if n != nullid]
1859 for p, n, l in pl if n != nullid]
1860
1860
1861 def children(self):
1861 def children(self):
1862 return []
1862 return []
1863
1863
1864 class workingfilectx(committablefilectx):
1864 class workingfilectx(committablefilectx):
1865 """A workingfilectx object makes access to data related to a particular
1865 """A workingfilectx object makes access to data related to a particular
1866 file in the working directory convenient."""
1866 file in the working directory convenient."""
1867 def __init__(self, repo, path, filelog=None, workingctx=None):
1867 def __init__(self, repo, path, filelog=None, workingctx=None):
1868 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1868 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1869
1869
1870 @propertycache
1870 @propertycache
1871 def _changectx(self):
1871 def _changectx(self):
1872 return workingctx(self._repo)
1872 return workingctx(self._repo)
1873
1873
1874 def data(self):
1874 def data(self):
1875 return self._repo.wread(self._path)
1875 return self._repo.wread(self._path)
1876 def renamed(self):
1876 def renamed(self):
1877 rp = self._repo.dirstate.copied(self._path)
1877 rp = self._repo.dirstate.copied(self._path)
1878 if not rp:
1878 if not rp:
1879 return None
1879 return None
1880 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1880 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1881
1881
1882 def size(self):
1882 def size(self):
1883 return self._repo.wvfs.lstat(self._path).st_size
1883 return self._repo.wvfs.lstat(self._path).st_size
1884 def date(self):
1884 def date(self):
1885 t, tz = self._changectx.date()
1885 t, tz = self._changectx.date()
1886 try:
1886 try:
1887 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1887 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1888 except OSError as err:
1888 except OSError as err:
1889 if err.errno != errno.ENOENT:
1889 if err.errno != errno.ENOENT:
1890 raise
1890 raise
1891 return (t, tz)
1891 return (t, tz)
1892
1892
1893 def exists(self):
1893 def exists(self):
1894 return self._repo.wvfs.exists(self._path)
1894 return self._repo.wvfs.exists(self._path)
1895
1895
1896 def lexists(self):
1896 def lexists(self):
1897 return self._repo.wvfs.lexists(self._path)
1897 return self._repo.wvfs.lexists(self._path)
1898
1898
1899 def audit(self):
1899 def audit(self):
1900 return self._repo.wvfs.audit(self._path)
1900 return self._repo.wvfs.audit(self._path)
1901
1901
1902 def cmp(self, fctx):
1902 def cmp(self, fctx):
1903 """compare with other file context
1903 """compare with other file context
1904
1904
1905 returns True if different than fctx.
1905 returns True if different than fctx.
1906 """
1906 """
1907 # fctx should be a filectx (not a workingfilectx)
1907 # fctx should be a filectx (not a workingfilectx)
1908 # invert comparison to reuse the same code path
1908 # invert comparison to reuse the same code path
1909 return fctx.cmp(self)
1909 return fctx.cmp(self)
1910
1910
1911 def remove(self, ignoremissing=False):
1911 def remove(self, ignoremissing=False):
1912 """wraps unlink for a repo's working directory"""
1912 """wraps unlink for a repo's working directory"""
1913 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1913 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1914
1914
1915 def write(self, data, flags, backgroundclose=False):
1915 def write(self, data, flags, backgroundclose=False):
1916 """wraps repo.wwrite"""
1916 """wraps repo.wwrite"""
1917 self._repo.wwrite(self._path, data, flags,
1917 self._repo.wwrite(self._path, data, flags,
1918 backgroundclose=backgroundclose)
1918 backgroundclose=backgroundclose)
1919
1919
1920 def clearunknown(self):
1920 def clearunknown(self):
1921 """Removes conflicting items in the working directory so that
1921 """Removes conflicting items in the working directory so that
1922 ``write()`` can be called successfully.
1922 ``write()`` can be called successfully.
1923 """
1923 """
1924 wvfs = self._repo.wvfs
1924 wvfs = self._repo.wvfs
1925 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1925 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1926 wvfs.removedirs(self._path)
1926 wvfs.removedirs(self._path)
1927
1927
1928 def setflags(self, l, x):
1928 def setflags(self, l, x):
1929 self._repo.wvfs.setflags(self._path, l, x)
1929 self._repo.wvfs.setflags(self._path, l, x)
1930
1930
1931 class overlayworkingctx(workingctx):
1931 class overlayworkingctx(workingctx):
1932 """Wraps another mutable context with a write-back cache that can be flushed
1932 """Wraps another mutable context with a write-back cache that can be flushed
1933 at a later time.
1933 at a later time.
1934
1934
1935 self._cache[path] maps to a dict with keys: {
1935 self._cache[path] maps to a dict with keys: {
1936 'exists': bool?
1936 'exists': bool?
1937 'date': date?
1937 'date': date?
1938 'data': str?
1938 'data': str?
1939 'flags': str?
1939 'flags': str?
1940 }
1940 }
1941 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1941 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1942 is `False`, the file was deleted.
1942 is `False`, the file was deleted.
1943 """
1943 """
1944
1944
1945 def __init__(self, repo, wrappedctx):
1945 def __init__(self, repo, wrappedctx):
1946 super(overlayworkingctx, self).__init__(repo)
1946 super(overlayworkingctx, self).__init__(repo)
1947 self._repo = repo
1947 self._repo = repo
1948 self._wrappedctx = wrappedctx
1948 self._wrappedctx = wrappedctx
1949 self._clean()
1949 self._clean()
1950
1950
1951 def data(self, path):
1951 def data(self, path):
1952 if self.isdirty(path):
1952 if self.isdirty(path):
1953 if self._cache[path]['exists']:
1953 if self._cache[path]['exists']:
1954 if self._cache[path]['data']:
1954 if self._cache[path]['data']:
1955 return self._cache[path]['data']
1955 return self._cache[path]['data']
1956 else:
1956 else:
1957 # Must fallback here, too, because we only set flags.
1957 # Must fallback here, too, because we only set flags.
1958 return self._wrappedctx[path].data()
1958 return self._wrappedctx[path].data()
1959 else:
1959 else:
1960 raise error.ProgrammingError("No such file or directory: %s" %
1960 raise error.ProgrammingError("No such file or directory: %s" %
1961 self._path)
1961 self._path)
1962 else:
1962 else:
1963 return self._wrappedctx[path].data()
1963 return self._wrappedctx[path].data()
1964
1964
1965 def filedate(self, path):
1965 def filedate(self, path):
1966 if self.isdirty(path):
1966 if self.isdirty(path):
1967 return self._cache[path]['date']
1967 return self._cache[path]['date']
1968 else:
1968 else:
1969 return self._wrappedctx[path].date()
1969 return self._wrappedctx[path].date()
1970
1970
1971 def flags(self, path):
1971 def flags(self, path):
1972 if self.isdirty(path):
1972 if self.isdirty(path):
1973 if self._cache[path]['exists']:
1973 if self._cache[path]['exists']:
1974 return self._cache[path]['flags']
1974 return self._cache[path]['flags']
1975 else:
1975 else:
1976 raise error.ProgrammingError("No such file or directory: %s" %
1976 raise error.ProgrammingError("No such file or directory: %s" %
1977 self._path)
1977 self._path)
1978 else:
1978 else:
1979 return self._wrappedctx[path].flags()
1979 return self._wrappedctx[path].flags()
1980
1980
1981 def write(self, path, data, flags=''):
1981 def write(self, path, data, flags=''):
1982 if data is None:
1982 if data is None:
1983 raise error.ProgrammingError("data must be non-None")
1983 raise error.ProgrammingError("data must be non-None")
1984 self._markdirty(path, exists=True, data=data, date=util.makedate(),
1984 self._markdirty(path, exists=True, data=data, date=util.makedate(),
1985 flags=flags)
1985 flags=flags)
1986
1986
1987 def setflags(self, path, l, x):
1987 def setflags(self, path, l, x):
1988 self._markdirty(path, exists=True, date=util.makedate(),
1988 self._markdirty(path, exists=True, date=util.makedate(),
1989 flags=(l and 'l' or '') + (x and 'x' or ''))
1989 flags=(l and 'l' or '') + (x and 'x' or ''))
1990
1990
1991 def remove(self, path):
1991 def remove(self, path):
1992 self._markdirty(path, exists=False)
1992 self._markdirty(path, exists=False)
1993
1993
1994 def exists(self, path):
1994 def exists(self, path):
1995 """exists behaves like `lexists`, but needs to follow symlinks and
1995 """exists behaves like `lexists`, but needs to follow symlinks and
1996 return False if they are broken.
1996 return False if they are broken.
1997 """
1997 """
1998 if self.isdirty(path):
1998 if self.isdirty(path):
1999 # If this path exists and is a symlink, "follow" it by calling
1999 # If this path exists and is a symlink, "follow" it by calling
2000 # exists on the destination path.
2000 # exists on the destination path.
2001 if (self._cache[path]['exists'] and
2001 if (self._cache[path]['exists'] and
2002 'l' in self._cache[path]['flags']):
2002 'l' in self._cache[path]['flags']):
2003 return self.exists(self._cache[path]['data'].strip())
2003 return self.exists(self._cache[path]['data'].strip())
2004 else:
2004 else:
2005 return self._cache[path]['exists']
2005 return self._cache[path]['exists']
2006 return self._wrappedctx[path].exists()
2006 return self._wrappedctx[path].exists()
2007
2007
2008 def lexists(self, path):
2008 def lexists(self, path):
2009 """lexists returns True if the path exists"""
2009 """lexists returns True if the path exists"""
2010 if self.isdirty(path):
2010 if self.isdirty(path):
2011 return self._cache[path]['exists']
2011 return self._cache[path]['exists']
2012 return self._wrappedctx[path].lexists()
2012 return self._wrappedctx[path].lexists()
2013
2013
2014 def size(self, path):
2014 def size(self, path):
2015 if self.isdirty(path):
2015 if self.isdirty(path):
2016 if self._cache[path]['exists']:
2016 if self._cache[path]['exists']:
2017 return len(self._cache[path]['data'])
2017 return len(self._cache[path]['data'])
2018 else:
2018 else:
2019 raise error.ProgrammingError("No such file or directory: %s" %
2019 raise error.ProgrammingError("No such file or directory: %s" %
2020 self._path)
2020 self._path)
2021 return self._wrappedctx[path].size()
2021 return self._wrappedctx[path].size()
2022
2022
2023 def flushall(self):
2023 def flushall(self):
2024 for path in self._writeorder:
2024 for path in self._writeorder:
2025 entry = self._cache[path]
2025 entry = self._cache[path]
2026 if entry['exists']:
2026 if entry['exists']:
2027 self._wrappedctx[path].clearunknown()
2027 self._wrappedctx[path].clearunknown()
2028 if entry['data'] is not None:
2028 if entry['data'] is not None:
2029 if entry['flags'] is None:
2029 if entry['flags'] is None:
2030 raise error.ProgrammingError('data set but not flags')
2030 raise error.ProgrammingError('data set but not flags')
2031 self._wrappedctx[path].write(
2031 self._wrappedctx[path].write(
2032 entry['data'],
2032 entry['data'],
2033 entry['flags'])
2033 entry['flags'])
2034 else:
2034 else:
2035 self._wrappedctx[path].setflags(
2035 self._wrappedctx[path].setflags(
2036 'l' in entry['flags'],
2036 'l' in entry['flags'],
2037 'x' in entry['flags'])
2037 'x' in entry['flags'])
2038 else:
2038 else:
2039 self._wrappedctx[path].remove(path)
2039 self._wrappedctx[path].remove(path)
2040 self._clean()
2040 self._clean()
2041
2041
2042 def isdirty(self, path):
2042 def isdirty(self, path):
2043 return path in self._cache
2043 return path in self._cache
2044
2044
2045 def _clean(self):
2045 def _clean(self):
2046 self._cache = {}
2046 self._cache = {}
2047 self._writeorder = []
2047 self._writeorder = []
2048
2048
2049 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2049 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2050 if path not in self._cache:
2050 if path not in self._cache:
2051 self._writeorder.append(path)
2051 self._writeorder.append(path)
2052
2052
2053 self._cache[path] = {
2053 self._cache[path] = {
2054 'exists': exists,
2054 'exists': exists,
2055 'data': data,
2055 'data': data,
2056 'date': date,
2056 'date': date,
2057 'flags': flags,
2057 'flags': flags,
2058 }
2058 }
2059
2059
2060 def filectx(self, path, filelog=None):
2060 def filectx(self, path, filelog=None):
2061 return overlayworkingfilectx(self._repo, path, parent=self,
2061 return overlayworkingfilectx(self._repo, path, parent=self,
2062 filelog=filelog)
2062 filelog=filelog)
2063
2063
2064 class overlayworkingfilectx(workingfilectx):
2064 class overlayworkingfilectx(workingfilectx):
2065 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2065 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2066 cache, which can be flushed through later by calling ``flush()``."""
2066 cache, which can be flushed through later by calling ``flush()``."""
2067
2067
2068 def __init__(self, repo, path, filelog=None, parent=None):
2068 def __init__(self, repo, path, filelog=None, parent=None):
2069 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2069 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2070 parent)
2070 parent)
2071 self._repo = repo
2071 self._repo = repo
2072 self._parent = parent
2072 self._parent = parent
2073 self._path = path
2073 self._path = path
2074
2074
2075 def ctx(self):
2075 def ctx(self):
2076 return self._parent
2076 return self._parent
2077
2077
2078 def data(self):
2078 def data(self):
2079 return self._parent.data(self._path)
2079 return self._parent.data(self._path)
2080
2080
2081 def date(self):
2081 def date(self):
2082 return self._parent.filedate(self._path)
2082 return self._parent.filedate(self._path)
2083
2083
2084 def exists(self):
2084 def exists(self):
2085 return self.lexists()
2085 return self.lexists()
2086
2086
2087 def lexists(self):
2087 def lexists(self):
2088 return self._parent.exists(self._path)
2088 return self._parent.exists(self._path)
2089
2089
2090 def renamed(self):
2090 def renamed(self):
2091 # Copies are currently tracked in the dirstate as before. Straight copy
2091 # Copies are currently tracked in the dirstate as before. Straight copy
2092 # from workingfilectx.
2092 # from workingfilectx.
2093 rp = self._repo.dirstate.copied(self._path)
2093 rp = self._repo.dirstate.copied(self._path)
2094 if not rp:
2094 if not rp:
2095 return None
2095 return None
2096 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2096 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2097
2097
2098 def size(self):
2098 def size(self):
2099 return self._parent.size(self._path)
2099 return self._parent.size(self._path)
2100
2100
2101 def audit(self):
2101 def audit(self):
2102 pass
2102 pass
2103
2103
2104 def flags(self):
2104 def flags(self):
2105 return self._parent.flags(self._path)
2105 return self._parent.flags(self._path)
2106
2106
2107 def setflags(self, islink, isexec):
2107 def setflags(self, islink, isexec):
2108 return self._parent.setflags(self._path, islink, isexec)
2108 return self._parent.setflags(self._path, islink, isexec)
2109
2109
2110 def write(self, data, flags, backgroundclose=False):
2110 def write(self, data, flags, backgroundclose=False):
2111 return self._parent.write(self._path, data, flags)
2111 return self._parent.write(self._path, data, flags)
2112
2112
2113 def remove(self, ignoremissing=False):
2113 def remove(self, ignoremissing=False):
2114 return self._parent.remove(self._path)
2114 return self._parent.remove(self._path)
2115
2115
2116 class workingcommitctx(workingctx):
2116 class workingcommitctx(workingctx):
2117 """A workingcommitctx object makes access to data related to
2117 """A workingcommitctx object makes access to data related to
2118 the revision being committed convenient.
2118 the revision being committed convenient.
2119
2119
2120 This hides changes in the working directory, if they aren't
2120 This hides changes in the working directory, if they aren't
2121 committed in this context.
2121 committed in this context.
2122 """
2122 """
2123 def __init__(self, repo, changes,
2123 def __init__(self, repo, changes,
2124 text="", user=None, date=None, extra=None):
2124 text="", user=None, date=None, extra=None):
2125 super(workingctx, self).__init__(repo, text, user, date, extra,
2125 super(workingctx, self).__init__(repo, text, user, date, extra,
2126 changes)
2126 changes)
2127
2127
2128 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2128 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2129 """Return matched files only in ``self._status``
2129 """Return matched files only in ``self._status``
2130
2130
2131 Uncommitted files appear "clean" via this context, even if
2131 Uncommitted files appear "clean" via this context, even if
2132 they aren't actually so in the working directory.
2132 they aren't actually so in the working directory.
2133 """
2133 """
2134 if clean:
2134 if clean:
2135 clean = [f for f in self._manifest if f not in self._changedset]
2135 clean = [f for f in self._manifest if f not in self._changedset]
2136 else:
2136 else:
2137 clean = []
2137 clean = []
2138 return scmutil.status([f for f in self._status.modified if match(f)],
2138 return scmutil.status([f for f in self._status.modified if match(f)],
2139 [f for f in self._status.added if match(f)],
2139 [f for f in self._status.added if match(f)],
2140 [f for f in self._status.removed if match(f)],
2140 [f for f in self._status.removed if match(f)],
2141 [], [], [], clean)
2141 [], [], [], clean)
2142
2142
2143 @propertycache
2143 @propertycache
2144 def _changedset(self):
2144 def _changedset(self):
2145 """Return the set of files changed in this context
2145 """Return the set of files changed in this context
2146 """
2146 """
2147 changed = set(self._status.modified)
2147 changed = set(self._status.modified)
2148 changed.update(self._status.added)
2148 changed.update(self._status.added)
2149 changed.update(self._status.removed)
2149 changed.update(self._status.removed)
2150 return changed
2150 return changed
2151
2151
2152 def makecachingfilectxfn(func):
2152 def makecachingfilectxfn(func):
2153 """Create a filectxfn that caches based on the path.
2153 """Create a filectxfn that caches based on the path.
2154
2154
2155 We can't use util.cachefunc because it uses all arguments as the cache
2155 We can't use util.cachefunc because it uses all arguments as the cache
2156 key and this creates a cycle since the arguments include the repo and
2156 key and this creates a cycle since the arguments include the repo and
2157 memctx.
2157 memctx.
2158 """
2158 """
2159 cache = {}
2159 cache = {}
2160
2160
2161 def getfilectx(repo, memctx, path):
2161 def getfilectx(repo, memctx, path):
2162 if path not in cache:
2162 if path not in cache:
2163 cache[path] = func(repo, memctx, path)
2163 cache[path] = func(repo, memctx, path)
2164 return cache[path]
2164 return cache[path]
2165
2165
2166 return getfilectx
2166 return getfilectx
2167
2167
2168 def memfilefromctx(ctx):
2168 def memfilefromctx(ctx):
2169 """Given a context return a memfilectx for ctx[path]
2169 """Given a context return a memfilectx for ctx[path]
2170
2170
2171 This is a convenience method for building a memctx based on another
2171 This is a convenience method for building a memctx based on another
2172 context.
2172 context.
2173 """
2173 """
2174 def getfilectx(repo, memctx, path):
2174 def getfilectx(repo, memctx, path):
2175 fctx = ctx[path]
2175 fctx = ctx[path]
2176 # this is weird but apparently we only keep track of one parent
2176 # this is weird but apparently we only keep track of one parent
2177 # (why not only store that instead of a tuple?)
2177 # (why not only store that instead of a tuple?)
2178 copied = fctx.renamed()
2178 copied = fctx.renamed()
2179 if copied:
2179 if copied:
2180 copied = copied[0]
2180 copied = copied[0]
2181 return memfilectx(repo, path, fctx.data(),
2181 return memfilectx(repo, path, fctx.data(),
2182 islink=fctx.islink(), isexec=fctx.isexec(),
2182 islink=fctx.islink(), isexec=fctx.isexec(),
2183 copied=copied, memctx=memctx)
2183 copied=copied, memctx=memctx)
2184
2184
2185 return getfilectx
2185 return getfilectx
2186
2186
2187 def memfilefrompatch(patchstore):
2187 def memfilefrompatch(patchstore):
2188 """Given a patch (e.g. patchstore object) return a memfilectx
2188 """Given a patch (e.g. patchstore object) return a memfilectx
2189
2189
2190 This is a convenience method for building a memctx based on a patchstore.
2190 This is a convenience method for building a memctx based on a patchstore.
2191 """
2191 """
2192 def getfilectx(repo, memctx, path):
2192 def getfilectx(repo, memctx, path):
2193 data, mode, copied = patchstore.getfile(path)
2193 data, mode, copied = patchstore.getfile(path)
2194 if data is None:
2194 if data is None:
2195 return None
2195 return None
2196 islink, isexec = mode
2196 islink, isexec = mode
2197 return memfilectx(repo, path, data, islink=islink,
2197 return memfilectx(repo, path, data, islink=islink,
2198 isexec=isexec, copied=copied,
2198 isexec=isexec, copied=copied,
2199 memctx=memctx)
2199 memctx=memctx)
2200
2200
2201 return getfilectx
2201 return getfilectx
2202
2202
2203 class memctx(committablectx):
2203 class memctx(committablectx):
2204 """Use memctx to perform in-memory commits via localrepo.commitctx().
2204 """Use memctx to perform in-memory commits via localrepo.commitctx().
2205
2205
2206 Revision information is supplied at initialization time while
2206 Revision information is supplied at initialization time while
2207 related files data and is made available through a callback
2207 related files data and is made available through a callback
2208 mechanism. 'repo' is the current localrepo, 'parents' is a
2208 mechanism. 'repo' is the current localrepo, 'parents' is a
2209 sequence of two parent revisions identifiers (pass None for every
2209 sequence of two parent revisions identifiers (pass None for every
2210 missing parent), 'text' is the commit message and 'files' lists
2210 missing parent), 'text' is the commit message and 'files' lists
2211 names of files touched by the revision (normalized and relative to
2211 names of files touched by the revision (normalized and relative to
2212 repository root).
2212 repository root).
2213
2213
2214 filectxfn(repo, memctx, path) is a callable receiving the
2214 filectxfn(repo, memctx, path) is a callable receiving the
2215 repository, the current memctx object and the normalized path of
2215 repository, the current memctx object and the normalized path of
2216 requested file, relative to repository root. It is fired by the
2216 requested file, relative to repository root. It is fired by the
2217 commit function for every file in 'files', but calls order is
2217 commit function for every file in 'files', but calls order is
2218 undefined. If the file is available in the revision being
2218 undefined. If the file is available in the revision being
2219 committed (updated or added), filectxfn returns a memfilectx
2219 committed (updated or added), filectxfn returns a memfilectx
2220 object. If the file was removed, filectxfn return None for recent
2220 object. If the file was removed, filectxfn return None for recent
2221 Mercurial. Moved files are represented by marking the source file
2221 Mercurial. Moved files are represented by marking the source file
2222 removed and the new file added with copy information (see
2222 removed and the new file added with copy information (see
2223 memfilectx).
2223 memfilectx).
2224
2224
2225 user receives the committer name and defaults to current
2225 user receives the committer name and defaults to current
2226 repository username, date is the commit date in any format
2226 repository username, date is the commit date in any format
2227 supported by util.parsedate() and defaults to current date, extra
2227 supported by util.parsedate() and defaults to current date, extra
2228 is a dictionary of metadata or is left empty.
2228 is a dictionary of metadata or is left empty.
2229 """
2229 """
2230
2230
2231 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2231 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2232 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2232 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2233 # this field to determine what to do in filectxfn.
2233 # this field to determine what to do in filectxfn.
2234 _returnnoneformissingfiles = True
2234 _returnnoneformissingfiles = True
2235
2235
2236 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2236 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2237 date=None, extra=None, branch=None, editor=False):
2237 date=None, extra=None, branch=None, editor=False):
2238 super(memctx, self).__init__(repo, text, user, date, extra)
2238 super(memctx, self).__init__(repo, text, user, date, extra)
2239 self._rev = None
2239 self._rev = None
2240 self._node = None
2240 self._node = None
2241 parents = [(p or nullid) for p in parents]
2241 parents = [(p or nullid) for p in parents]
2242 p1, p2 = parents
2242 p1, p2 = parents
2243 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2243 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2244 files = sorted(set(files))
2244 files = sorted(set(files))
2245 self._files = files
2245 self._files = files
2246 if branch is not None:
2246 if branch is not None:
2247 self._extra['branch'] = encoding.fromlocal(branch)
2247 self._extra['branch'] = encoding.fromlocal(branch)
2248 self.substate = {}
2248 self.substate = {}
2249
2249
2250 if isinstance(filectxfn, patch.filestore):
2250 if isinstance(filectxfn, patch.filestore):
2251 filectxfn = memfilefrompatch(filectxfn)
2251 filectxfn = memfilefrompatch(filectxfn)
2252 elif not callable(filectxfn):
2252 elif not callable(filectxfn):
2253 # if store is not callable, wrap it in a function
2253 # if store is not callable, wrap it in a function
2254 filectxfn = memfilefromctx(filectxfn)
2254 filectxfn = memfilefromctx(filectxfn)
2255
2255
2256 # memoizing increases performance for e.g. vcs convert scenarios.
2256 # memoizing increases performance for e.g. vcs convert scenarios.
2257 self._filectxfn = makecachingfilectxfn(filectxfn)
2257 self._filectxfn = makecachingfilectxfn(filectxfn)
2258
2258
2259 if editor:
2259 if editor:
2260 self._text = editor(self._repo, self, [])
2260 self._text = editor(self._repo, self, [])
2261 self._repo.savecommitmessage(self._text)
2261 self._repo.savecommitmessage(self._text)
2262
2262
2263 def filectx(self, path, filelog=None):
2263 def filectx(self, path, filelog=None):
2264 """get a file context from the working directory
2264 """get a file context from the working directory
2265
2265
2266 Returns None if file doesn't exist and should be removed."""
2266 Returns None if file doesn't exist and should be removed."""
2267 return self._filectxfn(self._repo, self, path)
2267 return self._filectxfn(self._repo, self, path)
2268
2268
2269 def commit(self):
2269 def commit(self):
2270 """commit context to the repo"""
2270 """commit context to the repo"""
2271 return self._repo.commitctx(self)
2271 return self._repo.commitctx(self)
2272
2272
2273 @propertycache
2273 @propertycache
2274 def _manifest(self):
2274 def _manifest(self):
2275 """generate a manifest based on the return values of filectxfn"""
2275 """generate a manifest based on the return values of filectxfn"""
2276
2276
2277 # keep this simple for now; just worry about p1
2277 # keep this simple for now; just worry about p1
2278 pctx = self._parents[0]
2278 pctx = self._parents[0]
2279 man = pctx.manifest().copy()
2279 man = pctx.manifest().copy()
2280
2280
2281 for f in self._status.modified:
2281 for f in self._status.modified:
2282 p1node = nullid
2282 p1node = nullid
2283 p2node = nullid
2283 p2node = nullid
2284 p = pctx[f].parents() # if file isn't in pctx, check p2?
2284 p = pctx[f].parents() # if file isn't in pctx, check p2?
2285 if len(p) > 0:
2285 if len(p) > 0:
2286 p1node = p[0].filenode()
2286 p1node = p[0].filenode()
2287 if len(p) > 1:
2287 if len(p) > 1:
2288 p2node = p[1].filenode()
2288 p2node = p[1].filenode()
2289 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2289 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2290
2290
2291 for f in self._status.added:
2291 for f in self._status.added:
2292 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2292 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2293
2293
2294 for f in self._status.removed:
2294 for f in self._status.removed:
2295 if f in man:
2295 if f in man:
2296 del man[f]
2296 del man[f]
2297
2297
2298 return man
2298 return man
2299
2299
2300 @propertycache
2300 @propertycache
2301 def _status(self):
2301 def _status(self):
2302 """Calculate exact status from ``files`` specified at construction
2302 """Calculate exact status from ``files`` specified at construction
2303 """
2303 """
2304 man1 = self.p1().manifest()
2304 man1 = self.p1().manifest()
2305 p2 = self._parents[1]
2305 p2 = self._parents[1]
2306 # "1 < len(self._parents)" can't be used for checking
2306 # "1 < len(self._parents)" can't be used for checking
2307 # existence of the 2nd parent, because "memctx._parents" is
2307 # existence of the 2nd parent, because "memctx._parents" is
2308 # explicitly initialized by the list, of which length is 2.
2308 # explicitly initialized by the list, of which length is 2.
2309 if p2.node() != nullid:
2309 if p2.node() != nullid:
2310 man2 = p2.manifest()
2310 man2 = p2.manifest()
2311 managing = lambda f: f in man1 or f in man2
2311 managing = lambda f: f in man1 or f in man2
2312 else:
2312 else:
2313 managing = lambda f: f in man1
2313 managing = lambda f: f in man1
2314
2314
2315 modified, added, removed = [], [], []
2315 modified, added, removed = [], [], []
2316 for f in self._files:
2316 for f in self._files:
2317 if not managing(f):
2317 if not managing(f):
2318 added.append(f)
2318 added.append(f)
2319 elif self[f]:
2319 elif self[f]:
2320 modified.append(f)
2320 modified.append(f)
2321 else:
2321 else:
2322 removed.append(f)
2322 removed.append(f)
2323
2323
2324 return scmutil.status(modified, added, removed, [], [], [], [])
2324 return scmutil.status(modified, added, removed, [], [], [], [])
2325
2325
2326 class memfilectx(committablefilectx):
2326 class memfilectx(committablefilectx):
2327 """memfilectx represents an in-memory file to commit.
2327 """memfilectx represents an in-memory file to commit.
2328
2328
2329 See memctx and committablefilectx for more details.
2329 See memctx and committablefilectx for more details.
2330 """
2330 """
2331 def __init__(self, repo, path, data, islink=False,
2331 def __init__(self, repo, path, data, islink=False,
2332 isexec=False, copied=None, memctx=None):
2332 isexec=False, copied=None, memctx=None):
2333 """
2333 """
2334 path is the normalized file path relative to repository root.
2334 path is the normalized file path relative to repository root.
2335 data is the file content as a string.
2335 data is the file content as a string.
2336 islink is True if the file is a symbolic link.
2336 islink is True if the file is a symbolic link.
2337 isexec is True if the file is executable.
2337 isexec is True if the file is executable.
2338 copied is the source file path if current file was copied in the
2338 copied is the source file path if current file was copied in the
2339 revision being committed, or None."""
2339 revision being committed, or None."""
2340 super(memfilectx, self).__init__(repo, path, None, memctx)
2340 super(memfilectx, self).__init__(repo, path, None, memctx)
2341 self._data = data
2341 self._data = data
2342 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2342 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2343 self._copied = None
2343 self._copied = None
2344 if copied:
2344 if copied:
2345 self._copied = (copied, nullid)
2345 self._copied = (copied, nullid)
2346
2346
2347 def data(self):
2347 def data(self):
2348 return self._data
2348 return self._data
2349
2349
2350 def remove(self, ignoremissing=False):
2350 def remove(self, ignoremissing=False):
2351 """wraps unlink for a repo's working directory"""
2351 """wraps unlink for a repo's working directory"""
2352 # need to figure out what to do here
2352 # need to figure out what to do here
2353 del self._changectx[self._path]
2353 del self._changectx[self._path]
2354
2354
2355 def write(self, data, flags):
2355 def write(self, data, flags):
2356 """wraps repo.wwrite"""
2356 """wraps repo.wwrite"""
2357 self._data = data
2357 self._data = data
2358
2358
2359 class overlayfilectx(committablefilectx):
2359 class overlayfilectx(committablefilectx):
2360 """Like memfilectx but take an original filectx and optional parameters to
2360 """Like memfilectx but take an original filectx and optional parameters to
2361 override parts of it. This is useful when fctx.data() is expensive (i.e.
2361 override parts of it. This is useful when fctx.data() is expensive (i.e.
2362 flag processor is expensive) and raw data, flags, and filenode could be
2362 flag processor is expensive) and raw data, flags, and filenode could be
2363 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2363 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2364 """
2364 """
2365
2365
2366 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2366 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2367 copied=None, ctx=None):
2367 copied=None, ctx=None):
2368 """originalfctx: filecontext to duplicate
2368 """originalfctx: filecontext to duplicate
2369
2369
2370 datafunc: None or a function to override data (file content). It is a
2370 datafunc: None or a function to override data (file content). It is a
2371 function to be lazy. path, flags, copied, ctx: None or overridden value
2371 function to be lazy. path, flags, copied, ctx: None or overridden value
2372
2372
2373 copied could be (path, rev), or False. copied could also be just path,
2373 copied could be (path, rev), or False. copied could also be just path,
2374 and will be converted to (path, nullid). This simplifies some callers.
2374 and will be converted to (path, nullid). This simplifies some callers.
2375 """
2375 """
2376
2376
2377 if path is None:
2377 if path is None:
2378 path = originalfctx.path()
2378 path = originalfctx.path()
2379 if ctx is None:
2379 if ctx is None:
2380 ctx = originalfctx.changectx()
2380 ctx = originalfctx.changectx()
2381 ctxmatch = lambda: True
2381 ctxmatch = lambda: True
2382 else:
2382 else:
2383 ctxmatch = lambda: ctx == originalfctx.changectx()
2383 ctxmatch = lambda: ctx == originalfctx.changectx()
2384
2384
2385 repo = originalfctx.repo()
2385 repo = originalfctx.repo()
2386 flog = originalfctx.filelog()
2386 flog = originalfctx.filelog()
2387 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2387 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2388
2388
2389 if copied is None:
2389 if copied is None:
2390 copied = originalfctx.renamed()
2390 copied = originalfctx.renamed()
2391 copiedmatch = lambda: True
2391 copiedmatch = lambda: True
2392 else:
2392 else:
2393 if copied and not isinstance(copied, tuple):
2393 if copied and not isinstance(copied, tuple):
2394 # repo._filecommit will recalculate copyrev so nullid is okay
2394 # repo._filecommit will recalculate copyrev so nullid is okay
2395 copied = (copied, nullid)
2395 copied = (copied, nullid)
2396 copiedmatch = lambda: copied == originalfctx.renamed()
2396 copiedmatch = lambda: copied == originalfctx.renamed()
2397
2397
2398 # When data, copied (could affect data), ctx (could affect filelog
2398 # When data, copied (could affect data), ctx (could affect filelog
2399 # parents) are not overridden, rawdata, rawflags, and filenode may be
2399 # parents) are not overridden, rawdata, rawflags, and filenode may be
2400 # reused (repo._filecommit should double check filelog parents).
2400 # reused (repo._filecommit should double check filelog parents).
2401 #
2401 #
2402 # path, flags are not hashed in filelog (but in manifestlog) so they do
2402 # path, flags are not hashed in filelog (but in manifestlog) so they do
2403 # not affect reusable here.
2403 # not affect reusable here.
2404 #
2404 #
2405 # If ctx or copied is overridden to a same value with originalfctx,
2405 # If ctx or copied is overridden to a same value with originalfctx,
2406 # still consider it's reusable. originalfctx.renamed() may be a bit
2406 # still consider it's reusable. originalfctx.renamed() may be a bit
2407 # expensive so it's not called unless necessary. Assuming datafunc is
2407 # expensive so it's not called unless necessary. Assuming datafunc is
2408 # always expensive, do not call it for this "reusable" test.
2408 # always expensive, do not call it for this "reusable" test.
2409 reusable = datafunc is None and ctxmatch() and copiedmatch()
2409 reusable = datafunc is None and ctxmatch() and copiedmatch()
2410
2410
2411 if datafunc is None:
2411 if datafunc is None:
2412 datafunc = originalfctx.data
2412 datafunc = originalfctx.data
2413 if flags is None:
2413 if flags is None:
2414 flags = originalfctx.flags()
2414 flags = originalfctx.flags()
2415
2415
2416 self._datafunc = datafunc
2416 self._datafunc = datafunc
2417 self._flags = flags
2417 self._flags = flags
2418 self._copied = copied
2418 self._copied = copied
2419
2419
2420 if reusable:
2420 if reusable:
2421 # copy extra fields from originalfctx
2421 # copy extra fields from originalfctx
2422 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2422 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2423 for attr in attrs:
2423 for attr_ in attrs:
2424 if util.safehasattr(originalfctx, attr):
2424 if util.safehasattr(originalfctx, attr_):
2425 setattr(self, attr, getattr(originalfctx, attr))
2425 setattr(self, attr_, getattr(originalfctx, attr_))
2426
2426
2427 def data(self):
2427 def data(self):
2428 return self._datafunc()
2428 return self._datafunc()
2429
2429
2430 class metadataonlyctx(committablectx):
2430 class metadataonlyctx(committablectx):
2431 """Like memctx but it's reusing the manifest of different commit.
2431 """Like memctx but it's reusing the manifest of different commit.
2432 Intended to be used by lightweight operations that are creating
2432 Intended to be used by lightweight operations that are creating
2433 metadata-only changes.
2433 metadata-only changes.
2434
2434
2435 Revision information is supplied at initialization time. 'repo' is the
2435 Revision information is supplied at initialization time. 'repo' is the
2436 current localrepo, 'ctx' is original revision which manifest we're reuisng
2436 current localrepo, 'ctx' is original revision which manifest we're reuisng
2437 'parents' is a sequence of two parent revisions identifiers (pass None for
2437 'parents' is a sequence of two parent revisions identifiers (pass None for
2438 every missing parent), 'text' is the commit.
2438 every missing parent), 'text' is the commit.
2439
2439
2440 user receives the committer name and defaults to current repository
2440 user receives the committer name and defaults to current repository
2441 username, date is the commit date in any format supported by
2441 username, date is the commit date in any format supported by
2442 util.parsedate() and defaults to current date, extra is a dictionary of
2442 util.parsedate() and defaults to current date, extra is a dictionary of
2443 metadata or is left empty.
2443 metadata or is left empty.
2444 """
2444 """
2445 def __new__(cls, repo, originalctx, *args, **kwargs):
2445 def __new__(cls, repo, originalctx, *args, **kwargs):
2446 return super(metadataonlyctx, cls).__new__(cls, repo)
2446 return super(metadataonlyctx, cls).__new__(cls, repo)
2447
2447
2448 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2448 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2449 date=None, extra=None, editor=False):
2449 date=None, extra=None, editor=False):
2450 if text is None:
2450 if text is None:
2451 text = originalctx.description()
2451 text = originalctx.description()
2452 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2452 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2453 self._rev = None
2453 self._rev = None
2454 self._node = None
2454 self._node = None
2455 self._originalctx = originalctx
2455 self._originalctx = originalctx
2456 self._manifestnode = originalctx.manifestnode()
2456 self._manifestnode = originalctx.manifestnode()
2457 if parents is None:
2457 if parents is None:
2458 parents = originalctx.parents()
2458 parents = originalctx.parents()
2459 else:
2459 else:
2460 parents = [repo[p] for p in parents if p is not None]
2460 parents = [repo[p] for p in parents if p is not None]
2461 parents = parents[:]
2461 parents = parents[:]
2462 while len(parents) < 2:
2462 while len(parents) < 2:
2463 parents.append(repo[nullid])
2463 parents.append(repo[nullid])
2464 p1, p2 = self._parents = parents
2464 p1, p2 = self._parents = parents
2465
2465
2466 # sanity check to ensure that the reused manifest parents are
2466 # sanity check to ensure that the reused manifest parents are
2467 # manifests of our commit parents
2467 # manifests of our commit parents
2468 mp1, mp2 = self.manifestctx().parents
2468 mp1, mp2 = self.manifestctx().parents
2469 if p1 != nullid and p1.manifestnode() != mp1:
2469 if p1 != nullid and p1.manifestnode() != mp1:
2470 raise RuntimeError('can\'t reuse the manifest: '
2470 raise RuntimeError('can\'t reuse the manifest: '
2471 'its p1 doesn\'t match the new ctx p1')
2471 'its p1 doesn\'t match the new ctx p1')
2472 if p2 != nullid and p2.manifestnode() != mp2:
2472 if p2 != nullid and p2.manifestnode() != mp2:
2473 raise RuntimeError('can\'t reuse the manifest: '
2473 raise RuntimeError('can\'t reuse the manifest: '
2474 'its p2 doesn\'t match the new ctx p2')
2474 'its p2 doesn\'t match the new ctx p2')
2475
2475
2476 self._files = originalctx.files()
2476 self._files = originalctx.files()
2477 self.substate = {}
2477 self.substate = {}
2478
2478
2479 if editor:
2479 if editor:
2480 self._text = editor(self._repo, self, [])
2480 self._text = editor(self._repo, self, [])
2481 self._repo.savecommitmessage(self._text)
2481 self._repo.savecommitmessage(self._text)
2482
2482
2483 def manifestnode(self):
2483 def manifestnode(self):
2484 return self._manifestnode
2484 return self._manifestnode
2485
2485
2486 @property
2486 @property
2487 def _manifestctx(self):
2487 def _manifestctx(self):
2488 return self._repo.manifestlog[self._manifestnode]
2488 return self._repo.manifestlog[self._manifestnode]
2489
2489
2490 def filectx(self, path, filelog=None):
2490 def filectx(self, path, filelog=None):
2491 return self._originalctx.filectx(path, filelog=filelog)
2491 return self._originalctx.filectx(path, filelog=filelog)
2492
2492
2493 def commit(self):
2493 def commit(self):
2494 """commit context to the repo"""
2494 """commit context to the repo"""
2495 return self._repo.commitctx(self)
2495 return self._repo.commitctx(self)
2496
2496
2497 @property
2497 @property
2498 def _manifest(self):
2498 def _manifest(self):
2499 return self._originalctx.manifest()
2499 return self._originalctx.manifest()
2500
2500
2501 @propertycache
2501 @propertycache
2502 def _status(self):
2502 def _status(self):
2503 """Calculate exact status from ``files`` specified in the ``origctx``
2503 """Calculate exact status from ``files`` specified in the ``origctx``
2504 and parents manifests.
2504 and parents manifests.
2505 """
2505 """
2506 man1 = self.p1().manifest()
2506 man1 = self.p1().manifest()
2507 p2 = self._parents[1]
2507 p2 = self._parents[1]
2508 # "1 < len(self._parents)" can't be used for checking
2508 # "1 < len(self._parents)" can't be used for checking
2509 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2509 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2510 # explicitly initialized by the list, of which length is 2.
2510 # explicitly initialized by the list, of which length is 2.
2511 if p2.node() != nullid:
2511 if p2.node() != nullid:
2512 man2 = p2.manifest()
2512 man2 = p2.manifest()
2513 managing = lambda f: f in man1 or f in man2
2513 managing = lambda f: f in man1 or f in man2
2514 else:
2514 else:
2515 managing = lambda f: f in man1
2515 managing = lambda f: f in man1
2516
2516
2517 modified, added, removed = [], [], []
2517 modified, added, removed = [], [], []
2518 for f in self._files:
2518 for f in self._files:
2519 if not managing(f):
2519 if not managing(f):
2520 added.append(f)
2520 added.append(f)
2521 elif f in self:
2521 elif f in self:
2522 modified.append(f)
2522 modified.append(f)
2523 else:
2523 else:
2524 removed.append(f)
2524 removed.append(f)
2525
2525
2526 return scmutil.status(modified, added, removed, [], [], [], [])
2526 return scmutil.status(modified, added, removed, [], [], [], [])
2527
2527
2528 class arbitraryfilectx(object):
2528 class arbitraryfilectx(object):
2529 """Allows you to use filectx-like functions on a file in an arbitrary
2529 """Allows you to use filectx-like functions on a file in an arbitrary
2530 location on disk, possibly not in the working directory.
2530 location on disk, possibly not in the working directory.
2531 """
2531 """
2532 def __init__(self, path):
2532 def __init__(self, path):
2533 self._path = path
2533 self._path = path
2534
2534
2535 def cmp(self, otherfilectx):
2535 def cmp(self, otherfilectx):
2536 return self.data() != otherfilectx.data()
2536 return self.data() != otherfilectx.data()
2537
2537
2538 def path(self):
2538 def path(self):
2539 return self._path
2539 return self._path
2540
2540
2541 def flags(self):
2541 def flags(self):
2542 return ''
2542 return ''
2543
2543
2544 def data(self):
2544 def data(self):
2545 return util.readfile(self._path)
2545 return util.readfile(self._path)
2546
2546
2547 def decodeddata(self):
2547 def decodeddata(self):
2548 with open(self._path, "rb") as f:
2548 with open(self._path, "rb") as f:
2549 return f.read()
2549 return f.read()
2550
2550
2551 def remove(self):
2551 def remove(self):
2552 util.unlink(self._path)
2552 util.unlink(self._path)
2553
2553
2554 def write(self, data, flags):
2554 def write(self, data, flags):
2555 assert not flags
2555 assert not flags
2556 with open(self._path, "w") as f:
2556 with open(self._path, "w") as f:
2557 f.write(data)
2557 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now