##// END OF EJS Templates
py3: define __bytes__ for basefilectx class...
Pulkit Goyal -
r33019:daccadd7 default
parent child Browse files
Show More
@@ -1,2306 +1,2312 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 revlog,
39 revlog,
40 scmutil,
40 scmutil,
41 subrepo,
41 subrepo,
42 util,
42 util,
43 )
43 )
44
44
45 propertycache = util.propertycache
45 propertycache = util.propertycache
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 r = short(self.node())
69 r = short(self.node())
70 if pycompat.ispy3:
70 if pycompat.ispy3:
71 return r.decode('ascii')
71 return r.decode('ascii')
72 return r
72 return r
73
73
74 def __bytes__(self):
74 def __bytes__(self):
75 return short(self.node())
75 return short(self.node())
76
76
77 def __int__(self):
77 def __int__(self):
78 return self.rev()
78 return self.rev()
79
79
80 def __repr__(self):
80 def __repr__(self):
81 return r"<%s %s>" % (type(self).__name__, str(self))
81 return r"<%s %s>" % (type(self).__name__, str(self))
82
82
83 def __eq__(self, other):
83 def __eq__(self, other):
84 try:
84 try:
85 return type(self) == type(other) and self._rev == other._rev
85 return type(self) == type(other) and self._rev == other._rev
86 except AttributeError:
86 except AttributeError:
87 return False
87 return False
88
88
89 def __ne__(self, other):
89 def __ne__(self, other):
90 return not (self == other)
90 return not (self == other)
91
91
92 def __contains__(self, key):
92 def __contains__(self, key):
93 return key in self._manifest
93 return key in self._manifest
94
94
95 def __getitem__(self, key):
95 def __getitem__(self, key):
96 return self.filectx(key)
96 return self.filectx(key)
97
97
98 def __iter__(self):
98 def __iter__(self):
99 return iter(self._manifest)
99 return iter(self._manifest)
100
100
101 def _buildstatusmanifest(self, status):
101 def _buildstatusmanifest(self, status):
102 """Builds a manifest that includes the given status results, if this is
102 """Builds a manifest that includes the given status results, if this is
103 a working copy context. For non-working copy contexts, it just returns
103 a working copy context. For non-working copy contexts, it just returns
104 the normal manifest."""
104 the normal manifest."""
105 return self.manifest()
105 return self.manifest()
106
106
107 def _matchstatus(self, other, match):
107 def _matchstatus(self, other, match):
108 """return match.always if match is none
108 """return match.always if match is none
109
109
110 This internal method provides a way for child objects to override the
110 This internal method provides a way for child objects to override the
111 match operator.
111 match operator.
112 """
112 """
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113 return match or matchmod.always(self._repo.root, self._repo.getcwd())
114
114
115 def _buildstatus(self, other, s, match, listignored, listclean,
115 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
116 listunknown):
117 """build a status with respect to another context"""
117 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
118 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
123 # delta application.
124 mf2 = None
124 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
125 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
126 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
128 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
129 mf2 = self._buildstatusmanifest(s)
130
130
131 modified, added = [], []
131 modified, added = [], []
132 removed = []
132 removed = []
133 clean = []
133 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
135 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
136 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
137 for fn, value in d.iteritems():
138 if fn in deletedset:
138 if fn in deletedset:
139 continue
139 continue
140 if value is None:
140 if value is None:
141 clean.append(fn)
141 clean.append(fn)
142 continue
142 continue
143 (node1, flag1), (node2, flag2) = value
143 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
144 if node1 is None:
145 added.append(fn)
145 added.append(fn)
146 elif node2 is None:
146 elif node2 is None:
147 removed.append(fn)
147 removed.append(fn)
148 elif flag1 != flag2:
148 elif flag1 != flag2:
149 modified.append(fn)
149 modified.append(fn)
150 elif node2 not in wdirnodes:
150 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
151 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
152 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
153 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
154 # to a file as a modification.
155 modified.append(fn)
155 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
156 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
157 modified.append(fn)
158 else:
158 else:
159 clean.append(fn)
159 clean.append(fn)
160
160
161 if removed:
161 if removed:
162 # need to filter files if they are already reported as removed
162 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
163 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
165 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
166 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
167 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
168 removed = [fn for fn in removed if fn not in deletedset]
169
169
170 return scmutil.status(modified, added, removed, deleted, unknown,
170 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
171 ignored, clean)
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepo.state(self, self._repo.ui)
175 return subrepo.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186 def manifest(self):
186 def manifest(self):
187 return self._manifest
187 return self._manifest
188 def manifestctx(self):
188 def manifestctx(self):
189 return self._manifestctx
189 return self._manifestctx
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192 def phasestr(self):
192 def phasestr(self):
193 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
194 def mutable(self):
194 def mutable(self):
195 return self.phase() > phases.public
195 return self.phase() > phases.public
196
196
197 def getfileset(self, expr):
197 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
199
199
200 def obsolete(self):
200 def obsolete(self):
201 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
203
204 def extinct(self):
204 def extinct(self):
205 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
207
208 def unstable(self):
208 def unstable(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
209 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211
211
212 def bumped(self):
212 def bumped(self):
213 """True if the changeset try to be a successor of a public changeset
213 """True if the changeset try to be a successor of a public changeset
214
214
215 Only non-public and non-obsolete changesets may be bumped.
215 Only non-public and non-obsolete changesets may be bumped.
216 """
216 """
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218
218
219 def divergent(self):
219 def divergent(self):
220 """Is a successors of a changeset with multiple possible successors set
220 """Is a successors of a changeset with multiple possible successors set
221
221
222 Only non-public and non-obsolete changesets may be divergent.
222 Only non-public and non-obsolete changesets may be divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225
225
226 def troubled(self):
226 def troubled(self):
227 """True if the changeset is either unstable, bumped or divergent"""
227 """True if the changeset is either unstable, bumped or divergent"""
228 return self.unstable() or self.bumped() or self.divergent()
228 return self.unstable() or self.bumped() or self.divergent()
229
229
230 def troubles(self):
230 def troubles(self):
231 """return the list of troubles affecting this changesets.
231 """return the list of troubles affecting this changesets.
232
232
233 Troubles are returned as strings. possible values are:
233 Troubles are returned as strings. possible values are:
234 - unstable,
234 - unstable,
235 - bumped,
235 - bumped,
236 - divergent.
236 - divergent.
237 """
237 """
238 troubles = []
238 troubles = []
239 if self.unstable():
239 if self.unstable():
240 troubles.append('unstable')
240 troubles.append('unstable')
241 if self.bumped():
241 if self.bumped():
242 troubles.append('bumped')
242 troubles.append('bumped')
243 if self.divergent():
243 if self.divergent():
244 troubles.append('divergent')
244 troubles.append('divergent')
245 return troubles
245 return troubles
246
246
247 def parents(self):
247 def parents(self):
248 """return contexts for each parent changeset"""
248 """return contexts for each parent changeset"""
249 return self._parents
249 return self._parents
250
250
251 def p1(self):
251 def p1(self):
252 return self._parents[0]
252 return self._parents[0]
253
253
254 def p2(self):
254 def p2(self):
255 parents = self._parents
255 parents = self._parents
256 if len(parents) == 2:
256 if len(parents) == 2:
257 return parents[1]
257 return parents[1]
258 return changectx(self._repo, nullrev)
258 return changectx(self._repo, nullrev)
259
259
260 def _fileinfo(self, path):
260 def _fileinfo(self, path):
261 if r'_manifest' in self.__dict__:
261 if r'_manifest' in self.__dict__:
262 try:
262 try:
263 return self._manifest[path], self._manifest.flags(path)
263 return self._manifest[path], self._manifest.flags(path)
264 except KeyError:
264 except KeyError:
265 raise error.ManifestLookupError(self._node, path,
265 raise error.ManifestLookupError(self._node, path,
266 _('not found in manifest'))
266 _('not found in manifest'))
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
267 if r'_manifestdelta' in self.__dict__ or path in self.files():
268 if path in self._manifestdelta:
268 if path in self._manifestdelta:
269 return (self._manifestdelta[path],
269 return (self._manifestdelta[path],
270 self._manifestdelta.flags(path))
270 self._manifestdelta.flags(path))
271 mfl = self._repo.manifestlog
271 mfl = self._repo.manifestlog
272 try:
272 try:
273 node, flag = mfl[self._changeset.manifest].find(path)
273 node, flag = mfl[self._changeset.manifest].find(path)
274 except KeyError:
274 except KeyError:
275 raise error.ManifestLookupError(self._node, path,
275 raise error.ManifestLookupError(self._node, path,
276 _('not found in manifest'))
276 _('not found in manifest'))
277
277
278 return node, flag
278 return node, flag
279
279
280 def filenode(self, path):
280 def filenode(self, path):
281 return self._fileinfo(path)[0]
281 return self._fileinfo(path)[0]
282
282
283 def flags(self, path):
283 def flags(self, path):
284 try:
284 try:
285 return self._fileinfo(path)[1]
285 return self._fileinfo(path)[1]
286 except error.LookupError:
286 except error.LookupError:
287 return ''
287 return ''
288
288
289 def sub(self, path, allowcreate=True):
289 def sub(self, path, allowcreate=True):
290 '''return a subrepo for the stored revision of path, never wdir()'''
290 '''return a subrepo for the stored revision of path, never wdir()'''
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291 return subrepo.subrepo(self, path, allowcreate=allowcreate)
292
292
293 def nullsub(self, path, pctx):
293 def nullsub(self, path, pctx):
294 return subrepo.nullsubrepo(self, path, pctx)
294 return subrepo.nullsubrepo(self, path, pctx)
295
295
296 def workingsub(self, path):
296 def workingsub(self, path):
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 '''return a subrepo for the stored revision, or wdir if this is a wdir
298 context.
298 context.
299 '''
299 '''
300 return subrepo.subrepo(self, path, allowwdir=True)
300 return subrepo.subrepo(self, path, allowwdir=True)
301
301
302 def match(self, pats=None, include=None, exclude=None, default='glob',
302 def match(self, pats=None, include=None, exclude=None, default='glob',
303 listsubrepos=False, badfn=None):
303 listsubrepos=False, badfn=None):
304 r = self._repo
304 r = self._repo
305 return matchmod.match(r.root, r.getcwd(), pats,
305 return matchmod.match(r.root, r.getcwd(), pats,
306 include, exclude, default,
306 include, exclude, default,
307 auditor=r.nofsauditor, ctx=self,
307 auditor=r.nofsauditor, ctx=self,
308 listsubrepos=listsubrepos, badfn=badfn)
308 listsubrepos=listsubrepos, badfn=badfn)
309
309
310 def diff(self, ctx2=None, match=None, **opts):
310 def diff(self, ctx2=None, match=None, **opts):
311 """Returns a diff generator for the given contexts and matcher"""
311 """Returns a diff generator for the given contexts and matcher"""
312 if ctx2 is None:
312 if ctx2 is None:
313 ctx2 = self.p1()
313 ctx2 = self.p1()
314 if ctx2 is not None:
314 if ctx2 is not None:
315 ctx2 = self._repo[ctx2]
315 ctx2 = self._repo[ctx2]
316 diffopts = patch.diffopts(self._repo.ui, opts)
316 diffopts = patch.diffopts(self._repo.ui, opts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
317 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318
318
319 def dirs(self):
319 def dirs(self):
320 return self._manifest.dirs()
320 return self._manifest.dirs()
321
321
322 def hasdir(self, dir):
322 def hasdir(self, dir):
323 return self._manifest.hasdir(dir)
323 return self._manifest.hasdir(dir)
324
324
325 def status(self, other=None, match=None, listignored=False,
325 def status(self, other=None, match=None, listignored=False,
326 listclean=False, listunknown=False, listsubrepos=False):
326 listclean=False, listunknown=False, listsubrepos=False):
327 """return status of files between two nodes or node and working
327 """return status of files between two nodes or node and working
328 directory.
328 directory.
329
329
330 If other is None, compare this node with working directory.
330 If other is None, compare this node with working directory.
331
331
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 returns (modified, added, removed, deleted, unknown, ignored, clean)
333 """
333 """
334
334
335 ctx1 = self
335 ctx1 = self
336 ctx2 = self._repo[other]
336 ctx2 = self._repo[other]
337
337
338 # This next code block is, admittedly, fragile logic that tests for
338 # This next code block is, admittedly, fragile logic that tests for
339 # reversing the contexts and wouldn't need to exist if it weren't for
339 # reversing the contexts and wouldn't need to exist if it weren't for
340 # the fast (and common) code path of comparing the working directory
340 # the fast (and common) code path of comparing the working directory
341 # with its first parent.
341 # with its first parent.
342 #
342 #
343 # What we're aiming for here is the ability to call:
343 # What we're aiming for here is the ability to call:
344 #
344 #
345 # workingctx.status(parentctx)
345 # workingctx.status(parentctx)
346 #
346 #
347 # If we always built the manifest for each context and compared those,
347 # If we always built the manifest for each context and compared those,
348 # then we'd be done. But the special case of the above call means we
348 # then we'd be done. But the special case of the above call means we
349 # just copy the manifest of the parent.
349 # just copy the manifest of the parent.
350 reversed = False
350 reversed = False
351 if (not isinstance(ctx1, changectx)
351 if (not isinstance(ctx1, changectx)
352 and isinstance(ctx2, changectx)):
352 and isinstance(ctx2, changectx)):
353 reversed = True
353 reversed = True
354 ctx1, ctx2 = ctx2, ctx1
354 ctx1, ctx2 = ctx2, ctx1
355
355
356 match = ctx2._matchstatus(ctx1, match)
356 match = ctx2._matchstatus(ctx1, match)
357 r = scmutil.status([], [], [], [], [], [], [])
357 r = scmutil.status([], [], [], [], [], [], [])
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 listunknown)
359 listunknown)
360
360
361 if reversed:
361 if reversed:
362 # Reverse added and removed. Clear deleted, unknown and ignored as
362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 # these make no sense to reverse.
363 # these make no sense to reverse.
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 r.clean)
365 r.clean)
366
366
367 if listsubrepos:
367 if listsubrepos:
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 try:
369 try:
370 rev2 = ctx2.subrev(subpath)
370 rev2 = ctx2.subrev(subpath)
371 except KeyError:
371 except KeyError:
372 # A subrepo that existed in node1 was deleted between
372 # A subrepo that existed in node1 was deleted between
373 # node1 and node2 (inclusive). Thus, ctx2's substate
373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 # won't contain that subpath. The best we can do ignore it.
374 # won't contain that subpath. The best we can do ignore it.
375 rev2 = None
375 rev2 = None
376 submatch = matchmod.subdirmatcher(subpath, match)
376 submatch = matchmod.subdirmatcher(subpath, match)
377 s = sub.status(rev2, match=submatch, ignored=listignored,
377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 clean=listclean, unknown=listunknown,
378 clean=listclean, unknown=listunknown,
379 listsubrepos=True)
379 listsubrepos=True)
380 for rfiles, sfiles in zip(r, s):
380 for rfiles, sfiles in zip(r, s):
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382
382
383 for l in r:
383 for l in r:
384 l.sort()
384 l.sort()
385
385
386 return r
386 return r
387
387
388 def _filterederror(repo, changeid):
388 def _filterederror(repo, changeid):
389 """build an exception to be raised about a filtered changeid
389 """build an exception to be raised about a filtered changeid
390
390
391 This is extracted in a function to help extensions (eg: evolve) to
391 This is extracted in a function to help extensions (eg: evolve) to
392 experiment with various message variants."""
392 experiment with various message variants."""
393 if repo.filtername.startswith('visible'):
393 if repo.filtername.startswith('visible'):
394 msg = _("hidden revision '%s'") % changeid
394 msg = _("hidden revision '%s'") % changeid
395 hint = _('use --hidden to access hidden revisions')
395 hint = _('use --hidden to access hidden revisions')
396 return error.FilteredRepoLookupError(msg, hint=hint)
396 return error.FilteredRepoLookupError(msg, hint=hint)
397 msg = _("filtered revision '%s' (not in '%s' subset)")
397 msg = _("filtered revision '%s' (not in '%s' subset)")
398 msg %= (changeid, repo.filtername)
398 msg %= (changeid, repo.filtername)
399 return error.FilteredRepoLookupError(msg)
399 return error.FilteredRepoLookupError(msg)
400
400
401 class changectx(basectx):
401 class changectx(basectx):
402 """A changecontext object makes access to data related to a particular
402 """A changecontext object makes access to data related to a particular
403 changeset convenient. It represents a read-only context already present in
403 changeset convenient. It represents a read-only context already present in
404 the repo."""
404 the repo."""
405 def __init__(self, repo, changeid=''):
405 def __init__(self, repo, changeid=''):
406 """changeid is a revision number, node, or tag"""
406 """changeid is a revision number, node, or tag"""
407
407
408 # since basectx.__new__ already took care of copying the object, we
408 # since basectx.__new__ already took care of copying the object, we
409 # don't need to do anything in __init__, so we just exit here
409 # don't need to do anything in __init__, so we just exit here
410 if isinstance(changeid, basectx):
410 if isinstance(changeid, basectx):
411 return
411 return
412
412
413 if changeid == '':
413 if changeid == '':
414 changeid = '.'
414 changeid = '.'
415 self._repo = repo
415 self._repo = repo
416
416
417 try:
417 try:
418 if isinstance(changeid, int):
418 if isinstance(changeid, int):
419 self._node = repo.changelog.node(changeid)
419 self._node = repo.changelog.node(changeid)
420 self._rev = changeid
420 self._rev = changeid
421 return
421 return
422 if not pycompat.ispy3 and isinstance(changeid, long):
422 if not pycompat.ispy3 and isinstance(changeid, long):
423 changeid = str(changeid)
423 changeid = str(changeid)
424 if changeid == 'null':
424 if changeid == 'null':
425 self._node = nullid
425 self._node = nullid
426 self._rev = nullrev
426 self._rev = nullrev
427 return
427 return
428 if changeid == 'tip':
428 if changeid == 'tip':
429 self._node = repo.changelog.tip()
429 self._node = repo.changelog.tip()
430 self._rev = repo.changelog.rev(self._node)
430 self._rev = repo.changelog.rev(self._node)
431 return
431 return
432 if changeid == '.' or changeid == repo.dirstate.p1():
432 if changeid == '.' or changeid == repo.dirstate.p1():
433 # this is a hack to delay/avoid loading obsmarkers
433 # this is a hack to delay/avoid loading obsmarkers
434 # when we know that '.' won't be hidden
434 # when we know that '.' won't be hidden
435 self._node = repo.dirstate.p1()
435 self._node = repo.dirstate.p1()
436 self._rev = repo.unfiltered().changelog.rev(self._node)
436 self._rev = repo.unfiltered().changelog.rev(self._node)
437 return
437 return
438 if len(changeid) == 20:
438 if len(changeid) == 20:
439 try:
439 try:
440 self._node = changeid
440 self._node = changeid
441 self._rev = repo.changelog.rev(changeid)
441 self._rev = repo.changelog.rev(changeid)
442 return
442 return
443 except error.FilteredRepoLookupError:
443 except error.FilteredRepoLookupError:
444 raise
444 raise
445 except LookupError:
445 except LookupError:
446 pass
446 pass
447
447
448 try:
448 try:
449 r = int(changeid)
449 r = int(changeid)
450 if '%d' % r != changeid:
450 if '%d' % r != changeid:
451 raise ValueError
451 raise ValueError
452 l = len(repo.changelog)
452 l = len(repo.changelog)
453 if r < 0:
453 if r < 0:
454 r += l
454 r += l
455 if r < 0 or r >= l and r != wdirrev:
455 if r < 0 or r >= l and r != wdirrev:
456 raise ValueError
456 raise ValueError
457 self._rev = r
457 self._rev = r
458 self._node = repo.changelog.node(r)
458 self._node = repo.changelog.node(r)
459 return
459 return
460 except error.FilteredIndexError:
460 except error.FilteredIndexError:
461 raise
461 raise
462 except (ValueError, OverflowError, IndexError):
462 except (ValueError, OverflowError, IndexError):
463 pass
463 pass
464
464
465 if len(changeid) == 40:
465 if len(changeid) == 40:
466 try:
466 try:
467 self._node = bin(changeid)
467 self._node = bin(changeid)
468 self._rev = repo.changelog.rev(self._node)
468 self._rev = repo.changelog.rev(self._node)
469 return
469 return
470 except error.FilteredLookupError:
470 except error.FilteredLookupError:
471 raise
471 raise
472 except (TypeError, LookupError):
472 except (TypeError, LookupError):
473 pass
473 pass
474
474
475 # lookup bookmarks through the name interface
475 # lookup bookmarks through the name interface
476 try:
476 try:
477 self._node = repo.names.singlenode(repo, changeid)
477 self._node = repo.names.singlenode(repo, changeid)
478 self._rev = repo.changelog.rev(self._node)
478 self._rev = repo.changelog.rev(self._node)
479 return
479 return
480 except KeyError:
480 except KeyError:
481 pass
481 pass
482 except error.FilteredRepoLookupError:
482 except error.FilteredRepoLookupError:
483 raise
483 raise
484 except error.RepoLookupError:
484 except error.RepoLookupError:
485 pass
485 pass
486
486
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 self._node = repo.unfiltered().changelog._partialmatch(changeid)
488 if self._node is not None:
488 if self._node is not None:
489 self._rev = repo.changelog.rev(self._node)
489 self._rev = repo.changelog.rev(self._node)
490 return
490 return
491
491
492 # lookup failed
492 # lookup failed
493 # check if it might have come from damaged dirstate
493 # check if it might have come from damaged dirstate
494 #
494 #
495 # XXX we could avoid the unfiltered if we had a recognizable
495 # XXX we could avoid the unfiltered if we had a recognizable
496 # exception for filtered changeset access
496 # exception for filtered changeset access
497 if changeid in repo.unfiltered().dirstate.parents():
497 if changeid in repo.unfiltered().dirstate.parents():
498 msg = _("working directory has unknown parent '%s'!")
498 msg = _("working directory has unknown parent '%s'!")
499 raise error.Abort(msg % short(changeid))
499 raise error.Abort(msg % short(changeid))
500 try:
500 try:
501 if len(changeid) == 20 and nonascii(changeid):
501 if len(changeid) == 20 and nonascii(changeid):
502 changeid = hex(changeid)
502 changeid = hex(changeid)
503 except TypeError:
503 except TypeError:
504 pass
504 pass
505 except (error.FilteredIndexError, error.FilteredLookupError,
505 except (error.FilteredIndexError, error.FilteredLookupError,
506 error.FilteredRepoLookupError):
506 error.FilteredRepoLookupError):
507 raise _filterederror(repo, changeid)
507 raise _filterederror(repo, changeid)
508 except IndexError:
508 except IndexError:
509 pass
509 pass
510 raise error.RepoLookupError(
510 raise error.RepoLookupError(
511 _("unknown revision '%s'") % changeid)
511 _("unknown revision '%s'") % changeid)
512
512
513 def __hash__(self):
513 def __hash__(self):
514 try:
514 try:
515 return hash(self._rev)
515 return hash(self._rev)
516 except AttributeError:
516 except AttributeError:
517 return id(self)
517 return id(self)
518
518
519 def __nonzero__(self):
519 def __nonzero__(self):
520 return self._rev != nullrev
520 return self._rev != nullrev
521
521
522 __bool__ = __nonzero__
522 __bool__ = __nonzero__
523
523
524 @propertycache
524 @propertycache
525 def _changeset(self):
525 def _changeset(self):
526 return self._repo.changelog.changelogrevision(self.rev())
526 return self._repo.changelog.changelogrevision(self.rev())
527
527
528 @propertycache
528 @propertycache
529 def _manifest(self):
529 def _manifest(self):
530 return self._manifestctx.read()
530 return self._manifestctx.read()
531
531
532 @property
532 @property
533 def _manifestctx(self):
533 def _manifestctx(self):
534 return self._repo.manifestlog[self._changeset.manifest]
534 return self._repo.manifestlog[self._changeset.manifest]
535
535
536 @propertycache
536 @propertycache
537 def _manifestdelta(self):
537 def _manifestdelta(self):
538 return self._manifestctx.readdelta()
538 return self._manifestctx.readdelta()
539
539
540 @propertycache
540 @propertycache
541 def _parents(self):
541 def _parents(self):
542 repo = self._repo
542 repo = self._repo
543 p1, p2 = repo.changelog.parentrevs(self._rev)
543 p1, p2 = repo.changelog.parentrevs(self._rev)
544 if p2 == nullrev:
544 if p2 == nullrev:
545 return [changectx(repo, p1)]
545 return [changectx(repo, p1)]
546 return [changectx(repo, p1), changectx(repo, p2)]
546 return [changectx(repo, p1), changectx(repo, p2)]
547
547
548 def changeset(self):
548 def changeset(self):
549 c = self._changeset
549 c = self._changeset
550 return (
550 return (
551 c.manifest,
551 c.manifest,
552 c.user,
552 c.user,
553 c.date,
553 c.date,
554 c.files,
554 c.files,
555 c.description,
555 c.description,
556 c.extra,
556 c.extra,
557 )
557 )
558 def manifestnode(self):
558 def manifestnode(self):
559 return self._changeset.manifest
559 return self._changeset.manifest
560
560
561 def user(self):
561 def user(self):
562 return self._changeset.user
562 return self._changeset.user
563 def date(self):
563 def date(self):
564 return self._changeset.date
564 return self._changeset.date
565 def files(self):
565 def files(self):
566 return self._changeset.files
566 return self._changeset.files
567 def description(self):
567 def description(self):
568 return self._changeset.description
568 return self._changeset.description
569 def branch(self):
569 def branch(self):
570 return encoding.tolocal(self._changeset.extra.get("branch"))
570 return encoding.tolocal(self._changeset.extra.get("branch"))
571 def closesbranch(self):
571 def closesbranch(self):
572 return 'close' in self._changeset.extra
572 return 'close' in self._changeset.extra
573 def extra(self):
573 def extra(self):
574 return self._changeset.extra
574 return self._changeset.extra
575 def tags(self):
575 def tags(self):
576 return self._repo.nodetags(self._node)
576 return self._repo.nodetags(self._node)
577 def bookmarks(self):
577 def bookmarks(self):
578 return self._repo.nodebookmarks(self._node)
578 return self._repo.nodebookmarks(self._node)
579 def phase(self):
579 def phase(self):
580 return self._repo._phasecache.phase(self._repo, self._rev)
580 return self._repo._phasecache.phase(self._repo, self._rev)
581 def hidden(self):
581 def hidden(self):
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
583
583
584 def children(self):
584 def children(self):
585 """return contexts for each child changeset"""
585 """return contexts for each child changeset"""
586 c = self._repo.changelog.children(self._node)
586 c = self._repo.changelog.children(self._node)
587 return [changectx(self._repo, x) for x in c]
587 return [changectx(self._repo, x) for x in c]
588
588
589 def ancestors(self):
589 def ancestors(self):
590 for a in self._repo.changelog.ancestors([self._rev]):
590 for a in self._repo.changelog.ancestors([self._rev]):
591 yield changectx(self._repo, a)
591 yield changectx(self._repo, a)
592
592
593 def descendants(self):
593 def descendants(self):
594 for d in self._repo.changelog.descendants([self._rev]):
594 for d in self._repo.changelog.descendants([self._rev]):
595 yield changectx(self._repo, d)
595 yield changectx(self._repo, d)
596
596
597 def filectx(self, path, fileid=None, filelog=None):
597 def filectx(self, path, fileid=None, filelog=None):
598 """get a file context from this changeset"""
598 """get a file context from this changeset"""
599 if fileid is None:
599 if fileid is None:
600 fileid = self.filenode(path)
600 fileid = self.filenode(path)
601 return filectx(self._repo, path, fileid=fileid,
601 return filectx(self._repo, path, fileid=fileid,
602 changectx=self, filelog=filelog)
602 changectx=self, filelog=filelog)
603
603
604 def ancestor(self, c2, warn=False):
604 def ancestor(self, c2, warn=False):
605 """return the "best" ancestor context of self and c2
605 """return the "best" ancestor context of self and c2
606
606
607 If there are multiple candidates, it will show a message and check
607 If there are multiple candidates, it will show a message and check
608 merge.preferancestor configuration before falling back to the
608 merge.preferancestor configuration before falling back to the
609 revlog ancestor."""
609 revlog ancestor."""
610 # deal with workingctxs
610 # deal with workingctxs
611 n2 = c2._node
611 n2 = c2._node
612 if n2 is None:
612 if n2 is None:
613 n2 = c2._parents[0]._node
613 n2 = c2._parents[0]._node
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
615 if not cahs:
615 if not cahs:
616 anc = nullid
616 anc = nullid
617 elif len(cahs) == 1:
617 elif len(cahs) == 1:
618 anc = cahs[0]
618 anc = cahs[0]
619 else:
619 else:
620 # experimental config: merge.preferancestor
620 # experimental config: merge.preferancestor
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
622 try:
622 try:
623 ctx = changectx(self._repo, r)
623 ctx = changectx(self._repo, r)
624 except error.RepoLookupError:
624 except error.RepoLookupError:
625 continue
625 continue
626 anc = ctx.node()
626 anc = ctx.node()
627 if anc in cahs:
627 if anc in cahs:
628 break
628 break
629 else:
629 else:
630 anc = self._repo.changelog.ancestor(self._node, n2)
630 anc = self._repo.changelog.ancestor(self._node, n2)
631 if warn:
631 if warn:
632 self._repo.ui.status(
632 self._repo.ui.status(
633 (_("note: using %s as ancestor of %s and %s\n") %
633 (_("note: using %s as ancestor of %s and %s\n") %
634 (short(anc), short(self._node), short(n2))) +
634 (short(anc), short(self._node), short(n2))) +
635 ''.join(_(" alternatively, use --config "
635 ''.join(_(" alternatively, use --config "
636 "merge.preferancestor=%s\n") %
636 "merge.preferancestor=%s\n") %
637 short(n) for n in sorted(cahs) if n != anc))
637 short(n) for n in sorted(cahs) if n != anc))
638 return changectx(self._repo, anc)
638 return changectx(self._repo, anc)
639
639
640 def descendant(self, other):
640 def descendant(self, other):
641 """True if other is descendant of this changeset"""
641 """True if other is descendant of this changeset"""
642 return self._repo.changelog.descendant(self._rev, other._rev)
642 return self._repo.changelog.descendant(self._rev, other._rev)
643
643
644 def walk(self, match):
644 def walk(self, match):
645 '''Generates matching file names.'''
645 '''Generates matching file names.'''
646
646
647 # Wrap match.bad method to have message with nodeid
647 # Wrap match.bad method to have message with nodeid
648 def bad(fn, msg):
648 def bad(fn, msg):
649 # The manifest doesn't know about subrepos, so don't complain about
649 # The manifest doesn't know about subrepos, so don't complain about
650 # paths into valid subrepos.
650 # paths into valid subrepos.
651 if any(fn == s or fn.startswith(s + '/')
651 if any(fn == s or fn.startswith(s + '/')
652 for s in self.substate):
652 for s in self.substate):
653 return
653 return
654 match.bad(fn, _('no such file in rev %s') % self)
654 match.bad(fn, _('no such file in rev %s') % self)
655
655
656 m = matchmod.badmatch(match, bad)
656 m = matchmod.badmatch(match, bad)
657 return self._manifest.walk(m)
657 return self._manifest.walk(m)
658
658
659 def matches(self, match):
659 def matches(self, match):
660 return self.walk(match)
660 return self.walk(match)
661
661
662 class basefilectx(object):
662 class basefilectx(object):
663 """A filecontext object represents the common logic for its children:
663 """A filecontext object represents the common logic for its children:
664 filectx: read-only access to a filerevision that is already present
664 filectx: read-only access to a filerevision that is already present
665 in the repo,
665 in the repo,
666 workingfilectx: a filecontext that represents files from the working
666 workingfilectx: a filecontext that represents files from the working
667 directory,
667 directory,
668 memfilectx: a filecontext that represents files in-memory,
668 memfilectx: a filecontext that represents files in-memory,
669 overlayfilectx: duplicate another filecontext with some fields overridden.
669 overlayfilectx: duplicate another filecontext with some fields overridden.
670 """
670 """
671 @propertycache
671 @propertycache
672 def _filelog(self):
672 def _filelog(self):
673 return self._repo.file(self._path)
673 return self._repo.file(self._path)
674
674
675 @propertycache
675 @propertycache
676 def _changeid(self):
676 def _changeid(self):
677 if r'_changeid' in self.__dict__:
677 if r'_changeid' in self.__dict__:
678 return self._changeid
678 return self._changeid
679 elif r'_changectx' in self.__dict__:
679 elif r'_changectx' in self.__dict__:
680 return self._changectx.rev()
680 return self._changectx.rev()
681 elif r'_descendantrev' in self.__dict__:
681 elif r'_descendantrev' in self.__dict__:
682 # this file context was created from a revision with a known
682 # this file context was created from a revision with a known
683 # descendant, we can (lazily) correct for linkrev aliases
683 # descendant, we can (lazily) correct for linkrev aliases
684 return self._adjustlinkrev(self._descendantrev)
684 return self._adjustlinkrev(self._descendantrev)
685 else:
685 else:
686 return self._filelog.linkrev(self._filerev)
686 return self._filelog.linkrev(self._filerev)
687
687
688 @propertycache
688 @propertycache
689 def _filenode(self):
689 def _filenode(self):
690 if r'_fileid' in self.__dict__:
690 if r'_fileid' in self.__dict__:
691 return self._filelog.lookup(self._fileid)
691 return self._filelog.lookup(self._fileid)
692 else:
692 else:
693 return self._changectx.filenode(self._path)
693 return self._changectx.filenode(self._path)
694
694
695 @propertycache
695 @propertycache
696 def _filerev(self):
696 def _filerev(self):
697 return self._filelog.rev(self._filenode)
697 return self._filelog.rev(self._filenode)
698
698
699 @propertycache
699 @propertycache
700 def _repopath(self):
700 def _repopath(self):
701 return self._path
701 return self._path
702
702
703 def __nonzero__(self):
703 def __nonzero__(self):
704 try:
704 try:
705 self._filenode
705 self._filenode
706 return True
706 return True
707 except error.LookupError:
707 except error.LookupError:
708 # file is missing
708 # file is missing
709 return False
709 return False
710
710
711 __bool__ = __nonzero__
711 __bool__ = __nonzero__
712
712
713 def __str__(self):
713 def __str__(self):
714 try:
714 try:
715 return "%s@%s" % (self.path(), self._changectx)
715 return "%s@%s" % (self.path(), self._changectx)
716 except error.LookupError:
716 except error.LookupError:
717 return "%s@???" % self.path()
717 return "%s@???" % self.path()
718
718
719 def __bytes__(self):
720 try:
721 return "%s@%s" % (self.path(), self._changectx)
722 except error.LookupError:
723 return "%s@???" % self.path()
724
719 def __repr__(self):
725 def __repr__(self):
720 return "<%s %s>" % (type(self).__name__, str(self))
726 return "<%s %s>" % (type(self).__name__, str(self))
721
727
722 def __hash__(self):
728 def __hash__(self):
723 try:
729 try:
724 return hash((self._path, self._filenode))
730 return hash((self._path, self._filenode))
725 except AttributeError:
731 except AttributeError:
726 return id(self)
732 return id(self)
727
733
728 def __eq__(self, other):
734 def __eq__(self, other):
729 try:
735 try:
730 return (type(self) == type(other) and self._path == other._path
736 return (type(self) == type(other) and self._path == other._path
731 and self._filenode == other._filenode)
737 and self._filenode == other._filenode)
732 except AttributeError:
738 except AttributeError:
733 return False
739 return False
734
740
735 def __ne__(self, other):
741 def __ne__(self, other):
736 return not (self == other)
742 return not (self == other)
737
743
738 def filerev(self):
744 def filerev(self):
739 return self._filerev
745 return self._filerev
740 def filenode(self):
746 def filenode(self):
741 return self._filenode
747 return self._filenode
742 @propertycache
748 @propertycache
743 def _flags(self):
749 def _flags(self):
744 return self._changectx.flags(self._path)
750 return self._changectx.flags(self._path)
745 def flags(self):
751 def flags(self):
746 return self._flags
752 return self._flags
747 def filelog(self):
753 def filelog(self):
748 return self._filelog
754 return self._filelog
749 def rev(self):
755 def rev(self):
750 return self._changeid
756 return self._changeid
751 def linkrev(self):
757 def linkrev(self):
752 return self._filelog.linkrev(self._filerev)
758 return self._filelog.linkrev(self._filerev)
753 def node(self):
759 def node(self):
754 return self._changectx.node()
760 return self._changectx.node()
755 def hex(self):
761 def hex(self):
756 return self._changectx.hex()
762 return self._changectx.hex()
757 def user(self):
763 def user(self):
758 return self._changectx.user()
764 return self._changectx.user()
759 def date(self):
765 def date(self):
760 return self._changectx.date()
766 return self._changectx.date()
761 def files(self):
767 def files(self):
762 return self._changectx.files()
768 return self._changectx.files()
763 def description(self):
769 def description(self):
764 return self._changectx.description()
770 return self._changectx.description()
765 def branch(self):
771 def branch(self):
766 return self._changectx.branch()
772 return self._changectx.branch()
767 def extra(self):
773 def extra(self):
768 return self._changectx.extra()
774 return self._changectx.extra()
769 def phase(self):
775 def phase(self):
770 return self._changectx.phase()
776 return self._changectx.phase()
771 def phasestr(self):
777 def phasestr(self):
772 return self._changectx.phasestr()
778 return self._changectx.phasestr()
773 def manifest(self):
779 def manifest(self):
774 return self._changectx.manifest()
780 return self._changectx.manifest()
775 def changectx(self):
781 def changectx(self):
776 return self._changectx
782 return self._changectx
777 def renamed(self):
783 def renamed(self):
778 return self._copied
784 return self._copied
779 def repo(self):
785 def repo(self):
780 return self._repo
786 return self._repo
781 def size(self):
787 def size(self):
782 return len(self.data())
788 return len(self.data())
783
789
784 def path(self):
790 def path(self):
785 return self._path
791 return self._path
786
792
787 def isbinary(self):
793 def isbinary(self):
788 try:
794 try:
789 return util.binary(self.data())
795 return util.binary(self.data())
790 except IOError:
796 except IOError:
791 return False
797 return False
792 def isexec(self):
798 def isexec(self):
793 return 'x' in self.flags()
799 return 'x' in self.flags()
794 def islink(self):
800 def islink(self):
795 return 'l' in self.flags()
801 return 'l' in self.flags()
796
802
797 def isabsent(self):
803 def isabsent(self):
798 """whether this filectx represents a file not in self._changectx
804 """whether this filectx represents a file not in self._changectx
799
805
800 This is mainly for merge code to detect change/delete conflicts. This is
806 This is mainly for merge code to detect change/delete conflicts. This is
801 expected to be True for all subclasses of basectx."""
807 expected to be True for all subclasses of basectx."""
802 return False
808 return False
803
809
804 _customcmp = False
810 _customcmp = False
805 def cmp(self, fctx):
811 def cmp(self, fctx):
806 """compare with other file context
812 """compare with other file context
807
813
808 returns True if different than fctx.
814 returns True if different than fctx.
809 """
815 """
810 if fctx._customcmp:
816 if fctx._customcmp:
811 return fctx.cmp(self)
817 return fctx.cmp(self)
812
818
813 if (fctx._filenode is None
819 if (fctx._filenode is None
814 and (self._repo._encodefilterpats
820 and (self._repo._encodefilterpats
815 # if file data starts with '\1\n', empty metadata block is
821 # if file data starts with '\1\n', empty metadata block is
816 # prepended, which adds 4 bytes to filelog.size().
822 # prepended, which adds 4 bytes to filelog.size().
817 or self.size() - 4 == fctx.size())
823 or self.size() - 4 == fctx.size())
818 or self.size() == fctx.size()):
824 or self.size() == fctx.size()):
819 return self._filelog.cmp(self._filenode, fctx.data())
825 return self._filelog.cmp(self._filenode, fctx.data())
820
826
821 return True
827 return True
822
828
823 def _adjustlinkrev(self, srcrev, inclusive=False):
829 def _adjustlinkrev(self, srcrev, inclusive=False):
824 """return the first ancestor of <srcrev> introducing <fnode>
830 """return the first ancestor of <srcrev> introducing <fnode>
825
831
826 If the linkrev of the file revision does not point to an ancestor of
832 If the linkrev of the file revision does not point to an ancestor of
827 srcrev, we'll walk down the ancestors until we find one introducing
833 srcrev, we'll walk down the ancestors until we find one introducing
828 this file revision.
834 this file revision.
829
835
830 :srcrev: the changeset revision we search ancestors from
836 :srcrev: the changeset revision we search ancestors from
831 :inclusive: if true, the src revision will also be checked
837 :inclusive: if true, the src revision will also be checked
832 """
838 """
833 repo = self._repo
839 repo = self._repo
834 cl = repo.unfiltered().changelog
840 cl = repo.unfiltered().changelog
835 mfl = repo.manifestlog
841 mfl = repo.manifestlog
836 # fetch the linkrev
842 # fetch the linkrev
837 lkr = self.linkrev()
843 lkr = self.linkrev()
838 # hack to reuse ancestor computation when searching for renames
844 # hack to reuse ancestor computation when searching for renames
839 memberanc = getattr(self, '_ancestrycontext', None)
845 memberanc = getattr(self, '_ancestrycontext', None)
840 iteranc = None
846 iteranc = None
841 if srcrev is None:
847 if srcrev is None:
842 # wctx case, used by workingfilectx during mergecopy
848 # wctx case, used by workingfilectx during mergecopy
843 revs = [p.rev() for p in self._repo[None].parents()]
849 revs = [p.rev() for p in self._repo[None].parents()]
844 inclusive = True # we skipped the real (revless) source
850 inclusive = True # we skipped the real (revless) source
845 else:
851 else:
846 revs = [srcrev]
852 revs = [srcrev]
847 if memberanc is None:
853 if memberanc is None:
848 memberanc = iteranc = cl.ancestors(revs, lkr,
854 memberanc = iteranc = cl.ancestors(revs, lkr,
849 inclusive=inclusive)
855 inclusive=inclusive)
850 # check if this linkrev is an ancestor of srcrev
856 # check if this linkrev is an ancestor of srcrev
851 if lkr not in memberanc:
857 if lkr not in memberanc:
852 if iteranc is None:
858 if iteranc is None:
853 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
859 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
854 fnode = self._filenode
860 fnode = self._filenode
855 path = self._path
861 path = self._path
856 for a in iteranc:
862 for a in iteranc:
857 ac = cl.read(a) # get changeset data (we avoid object creation)
863 ac = cl.read(a) # get changeset data (we avoid object creation)
858 if path in ac[3]: # checking the 'files' field.
864 if path in ac[3]: # checking the 'files' field.
859 # The file has been touched, check if the content is
865 # The file has been touched, check if the content is
860 # similar to the one we search for.
866 # similar to the one we search for.
861 if fnode == mfl[ac[0]].readfast().get(path):
867 if fnode == mfl[ac[0]].readfast().get(path):
862 return a
868 return a
863 # In theory, we should never get out of that loop without a result.
869 # In theory, we should never get out of that loop without a result.
864 # But if manifest uses a buggy file revision (not children of the
870 # But if manifest uses a buggy file revision (not children of the
865 # one it replaces) we could. Such a buggy situation will likely
871 # one it replaces) we could. Such a buggy situation will likely
866 # result is crash somewhere else at to some point.
872 # result is crash somewhere else at to some point.
867 return lkr
873 return lkr
868
874
869 def introrev(self):
875 def introrev(self):
870 """return the rev of the changeset which introduced this file revision
876 """return the rev of the changeset which introduced this file revision
871
877
872 This method is different from linkrev because it take into account the
878 This method is different from linkrev because it take into account the
873 changeset the filectx was created from. It ensures the returned
879 changeset the filectx was created from. It ensures the returned
874 revision is one of its ancestors. This prevents bugs from
880 revision is one of its ancestors. This prevents bugs from
875 'linkrev-shadowing' when a file revision is used by multiple
881 'linkrev-shadowing' when a file revision is used by multiple
876 changesets.
882 changesets.
877 """
883 """
878 lkr = self.linkrev()
884 lkr = self.linkrev()
879 attrs = vars(self)
885 attrs = vars(self)
880 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
886 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
881 if noctx or self.rev() == lkr:
887 if noctx or self.rev() == lkr:
882 return self.linkrev()
888 return self.linkrev()
883 return self._adjustlinkrev(self.rev(), inclusive=True)
889 return self._adjustlinkrev(self.rev(), inclusive=True)
884
890
885 def _parentfilectx(self, path, fileid, filelog):
891 def _parentfilectx(self, path, fileid, filelog):
886 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
892 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
887 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
893 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
888 if '_changeid' in vars(self) or '_changectx' in vars(self):
894 if '_changeid' in vars(self) or '_changectx' in vars(self):
889 # If self is associated with a changeset (probably explicitly
895 # If self is associated with a changeset (probably explicitly
890 # fed), ensure the created filectx is associated with a
896 # fed), ensure the created filectx is associated with a
891 # changeset that is an ancestor of self.changectx.
897 # changeset that is an ancestor of self.changectx.
892 # This lets us later use _adjustlinkrev to get a correct link.
898 # This lets us later use _adjustlinkrev to get a correct link.
893 fctx._descendantrev = self.rev()
899 fctx._descendantrev = self.rev()
894 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
900 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 elif '_descendantrev' in vars(self):
901 elif '_descendantrev' in vars(self):
896 # Otherwise propagate _descendantrev if we have one associated.
902 # Otherwise propagate _descendantrev if we have one associated.
897 fctx._descendantrev = self._descendantrev
903 fctx._descendantrev = self._descendantrev
898 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
904 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
899 return fctx
905 return fctx
900
906
901 def parents(self):
907 def parents(self):
902 _path = self._path
908 _path = self._path
903 fl = self._filelog
909 fl = self._filelog
904 parents = self._filelog.parents(self._filenode)
910 parents = self._filelog.parents(self._filenode)
905 pl = [(_path, node, fl) for node in parents if node != nullid]
911 pl = [(_path, node, fl) for node in parents if node != nullid]
906
912
907 r = fl.renamed(self._filenode)
913 r = fl.renamed(self._filenode)
908 if r:
914 if r:
909 # - In the simple rename case, both parent are nullid, pl is empty.
915 # - In the simple rename case, both parent are nullid, pl is empty.
910 # - In case of merge, only one of the parent is null id and should
916 # - In case of merge, only one of the parent is null id and should
911 # be replaced with the rename information. This parent is -always-
917 # be replaced with the rename information. This parent is -always-
912 # the first one.
918 # the first one.
913 #
919 #
914 # As null id have always been filtered out in the previous list
920 # As null id have always been filtered out in the previous list
915 # comprehension, inserting to 0 will always result in "replacing
921 # comprehension, inserting to 0 will always result in "replacing
916 # first nullid parent with rename information.
922 # first nullid parent with rename information.
917 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
923 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
918
924
919 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
925 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
920
926
921 def p1(self):
927 def p1(self):
922 return self.parents()[0]
928 return self.parents()[0]
923
929
924 def p2(self):
930 def p2(self):
925 p = self.parents()
931 p = self.parents()
926 if len(p) == 2:
932 if len(p) == 2:
927 return p[1]
933 return p[1]
928 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
934 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
929
935
930 def annotate(self, follow=False, linenumber=False, skiprevs=None,
936 def annotate(self, follow=False, linenumber=False, skiprevs=None,
931 diffopts=None):
937 diffopts=None):
932 '''returns a list of tuples of ((ctx, number), line) for each line
938 '''returns a list of tuples of ((ctx, number), line) for each line
933 in the file, where ctx is the filectx of the node where
939 in the file, where ctx is the filectx of the node where
934 that line was last changed; if linenumber parameter is true, number is
940 that line was last changed; if linenumber parameter is true, number is
935 the line number at the first appearance in the managed file, otherwise,
941 the line number at the first appearance in the managed file, otherwise,
936 number has a fixed value of False.
942 number has a fixed value of False.
937 '''
943 '''
938
944
939 def lines(text):
945 def lines(text):
940 if text.endswith("\n"):
946 if text.endswith("\n"):
941 return text.count("\n")
947 return text.count("\n")
942 return text.count("\n") + int(bool(text))
948 return text.count("\n") + int(bool(text))
943
949
944 if linenumber:
950 if linenumber:
945 def decorate(text, rev):
951 def decorate(text, rev):
946 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
952 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
947 else:
953 else:
948 def decorate(text, rev):
954 def decorate(text, rev):
949 return ([(rev, False)] * lines(text), text)
955 return ([(rev, False)] * lines(text), text)
950
956
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
957 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
952
958
953 def parents(f):
959 def parents(f):
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
960 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
961 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
962 # from the topmost introrev (= srcrev) down to p.linkrev() if it
957 # isn't an ancestor of the srcrev.
963 # isn't an ancestor of the srcrev.
958 f._changeid
964 f._changeid
959 pl = f.parents()
965 pl = f.parents()
960
966
961 # Don't return renamed parents if we aren't following.
967 # Don't return renamed parents if we aren't following.
962 if not follow:
968 if not follow:
963 pl = [p for p in pl if p.path() == f.path()]
969 pl = [p for p in pl if p.path() == f.path()]
964
970
965 # renamed filectx won't have a filelog yet, so set it
971 # renamed filectx won't have a filelog yet, so set it
966 # from the cache to save time
972 # from the cache to save time
967 for p in pl:
973 for p in pl:
968 if not '_filelog' in p.__dict__:
974 if not '_filelog' in p.__dict__:
969 p._filelog = getlog(p.path())
975 p._filelog = getlog(p.path())
970
976
971 return pl
977 return pl
972
978
973 # use linkrev to find the first changeset where self appeared
979 # use linkrev to find the first changeset where self appeared
974 base = self
980 base = self
975 introrev = self.introrev()
981 introrev = self.introrev()
976 if self.rev() != introrev:
982 if self.rev() != introrev:
977 base = self.filectx(self.filenode(), changeid=introrev)
983 base = self.filectx(self.filenode(), changeid=introrev)
978 if getattr(base, '_ancestrycontext', None) is None:
984 if getattr(base, '_ancestrycontext', None) is None:
979 cl = self._repo.changelog
985 cl = self._repo.changelog
980 if introrev is None:
986 if introrev is None:
981 # wctx is not inclusive, but works because _ancestrycontext
987 # wctx is not inclusive, but works because _ancestrycontext
982 # is used to test filelog revisions
988 # is used to test filelog revisions
983 ac = cl.ancestors([p.rev() for p in base.parents()],
989 ac = cl.ancestors([p.rev() for p in base.parents()],
984 inclusive=True)
990 inclusive=True)
985 else:
991 else:
986 ac = cl.ancestors([introrev], inclusive=True)
992 ac = cl.ancestors([introrev], inclusive=True)
987 base._ancestrycontext = ac
993 base._ancestrycontext = ac
988
994
989 # This algorithm would prefer to be recursive, but Python is a
995 # This algorithm would prefer to be recursive, but Python is a
990 # bit recursion-hostile. Instead we do an iterative
996 # bit recursion-hostile. Instead we do an iterative
991 # depth-first search.
997 # depth-first search.
992
998
993 # 1st DFS pre-calculates pcache and needed
999 # 1st DFS pre-calculates pcache and needed
994 visit = [base]
1000 visit = [base]
995 pcache = {}
1001 pcache = {}
996 needed = {base: 1}
1002 needed = {base: 1}
997 while visit:
1003 while visit:
998 f = visit.pop()
1004 f = visit.pop()
999 if f in pcache:
1005 if f in pcache:
1000 continue
1006 continue
1001 pl = parents(f)
1007 pl = parents(f)
1002 pcache[f] = pl
1008 pcache[f] = pl
1003 for p in pl:
1009 for p in pl:
1004 needed[p] = needed.get(p, 0) + 1
1010 needed[p] = needed.get(p, 0) + 1
1005 if p not in pcache:
1011 if p not in pcache:
1006 visit.append(p)
1012 visit.append(p)
1007
1013
1008 # 2nd DFS does the actual annotate
1014 # 2nd DFS does the actual annotate
1009 visit[:] = [base]
1015 visit[:] = [base]
1010 hist = {}
1016 hist = {}
1011 while visit:
1017 while visit:
1012 f = visit[-1]
1018 f = visit[-1]
1013 if f in hist:
1019 if f in hist:
1014 visit.pop()
1020 visit.pop()
1015 continue
1021 continue
1016
1022
1017 ready = True
1023 ready = True
1018 pl = pcache[f]
1024 pl = pcache[f]
1019 for p in pl:
1025 for p in pl:
1020 if p not in hist:
1026 if p not in hist:
1021 ready = False
1027 ready = False
1022 visit.append(p)
1028 visit.append(p)
1023 if ready:
1029 if ready:
1024 visit.pop()
1030 visit.pop()
1025 curr = decorate(f.data(), f)
1031 curr = decorate(f.data(), f)
1026 skipchild = False
1032 skipchild = False
1027 if skiprevs is not None:
1033 if skiprevs is not None:
1028 skipchild = f._changeid in skiprevs
1034 skipchild = f._changeid in skiprevs
1029 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1035 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1030 diffopts)
1036 diffopts)
1031 for p in pl:
1037 for p in pl:
1032 if needed[p] == 1:
1038 if needed[p] == 1:
1033 del hist[p]
1039 del hist[p]
1034 del needed[p]
1040 del needed[p]
1035 else:
1041 else:
1036 needed[p] -= 1
1042 needed[p] -= 1
1037
1043
1038 hist[f] = curr
1044 hist[f] = curr
1039 del pcache[f]
1045 del pcache[f]
1040
1046
1041 return zip(hist[base][0], hist[base][1].splitlines(True))
1047 return zip(hist[base][0], hist[base][1].splitlines(True))
1042
1048
1043 def ancestors(self, followfirst=False):
1049 def ancestors(self, followfirst=False):
1044 visit = {}
1050 visit = {}
1045 c = self
1051 c = self
1046 if followfirst:
1052 if followfirst:
1047 cut = 1
1053 cut = 1
1048 else:
1054 else:
1049 cut = None
1055 cut = None
1050
1056
1051 while True:
1057 while True:
1052 for parent in c.parents()[:cut]:
1058 for parent in c.parents()[:cut]:
1053 visit[(parent.linkrev(), parent.filenode())] = parent
1059 visit[(parent.linkrev(), parent.filenode())] = parent
1054 if not visit:
1060 if not visit:
1055 break
1061 break
1056 c = visit.pop(max(visit))
1062 c = visit.pop(max(visit))
1057 yield c
1063 yield c
1058
1064
1059 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1065 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1060 r'''
1066 r'''
1061 Given parent and child fctxes and annotate data for parents, for all lines
1067 Given parent and child fctxes and annotate data for parents, for all lines
1062 in either parent that match the child, annotate the child with the parent's
1068 in either parent that match the child, annotate the child with the parent's
1063 data.
1069 data.
1064
1070
1065 Additionally, if `skipchild` is True, replace all other lines with parent
1071 Additionally, if `skipchild` is True, replace all other lines with parent
1066 annotate data as well such that child is never blamed for any lines.
1072 annotate data as well such that child is never blamed for any lines.
1067
1073
1068 >>> oldfctx = 'old'
1074 >>> oldfctx = 'old'
1069 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1075 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1070 >>> olddata = 'a\nb\n'
1076 >>> olddata = 'a\nb\n'
1071 >>> p1data = 'a\nb\nc\n'
1077 >>> p1data = 'a\nb\nc\n'
1072 >>> p2data = 'a\nc\nd\n'
1078 >>> p2data = 'a\nc\nd\n'
1073 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1079 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1074 >>> diffopts = mdiff.diffopts()
1080 >>> diffopts = mdiff.diffopts()
1075
1081
1076 >>> def decorate(text, rev):
1082 >>> def decorate(text, rev):
1077 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1083 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1078
1084
1079 Basic usage:
1085 Basic usage:
1080
1086
1081 >>> oldann = decorate(olddata, oldfctx)
1087 >>> oldann = decorate(olddata, oldfctx)
1082 >>> p1ann = decorate(p1data, p1fctx)
1088 >>> p1ann = decorate(p1data, p1fctx)
1083 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1089 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1084 >>> p1ann[0]
1090 >>> p1ann[0]
1085 [('old', 1), ('old', 2), ('p1', 3)]
1091 [('old', 1), ('old', 2), ('p1', 3)]
1086 >>> p2ann = decorate(p2data, p2fctx)
1092 >>> p2ann = decorate(p2data, p2fctx)
1087 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1093 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1088 >>> p2ann[0]
1094 >>> p2ann[0]
1089 [('old', 1), ('p2', 2), ('p2', 3)]
1095 [('old', 1), ('p2', 2), ('p2', 3)]
1090
1096
1091 Test with multiple parents (note the difference caused by ordering):
1097 Test with multiple parents (note the difference caused by ordering):
1092
1098
1093 >>> childann = decorate(childdata, childfctx)
1099 >>> childann = decorate(childdata, childfctx)
1094 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1100 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1095 ... diffopts)
1101 ... diffopts)
1096 >>> childann[0]
1102 >>> childann[0]
1097 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1103 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1098
1104
1099 >>> childann = decorate(childdata, childfctx)
1105 >>> childann = decorate(childdata, childfctx)
1100 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1106 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1101 ... diffopts)
1107 ... diffopts)
1102 >>> childann[0]
1108 >>> childann[0]
1103 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1109 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1104
1110
1105 Test with skipchild (note the difference caused by ordering):
1111 Test with skipchild (note the difference caused by ordering):
1106
1112
1107 >>> childann = decorate(childdata, childfctx)
1113 >>> childann = decorate(childdata, childfctx)
1108 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1114 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1109 ... diffopts)
1115 ... diffopts)
1110 >>> childann[0]
1116 >>> childann[0]
1111 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1117 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1112
1118
1113 >>> childann = decorate(childdata, childfctx)
1119 >>> childann = decorate(childdata, childfctx)
1114 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1120 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1115 ... diffopts)
1121 ... diffopts)
1116 >>> childann[0]
1122 >>> childann[0]
1117 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1123 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1118 '''
1124 '''
1119 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1125 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1120 for parent in parents]
1126 for parent in parents]
1121
1127
1122 if skipchild:
1128 if skipchild:
1123 # Need to iterate over the blocks twice -- make it a list
1129 # Need to iterate over the blocks twice -- make it a list
1124 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1130 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1125 # Mercurial currently prefers p2 over p1 for annotate.
1131 # Mercurial currently prefers p2 over p1 for annotate.
1126 # TODO: change this?
1132 # TODO: change this?
1127 for parent, blocks in pblocks:
1133 for parent, blocks in pblocks:
1128 for (a1, a2, b1, b2), t in blocks:
1134 for (a1, a2, b1, b2), t in blocks:
1129 # Changed blocks ('!') or blocks made only of blank lines ('~')
1135 # Changed blocks ('!') or blocks made only of blank lines ('~')
1130 # belong to the child.
1136 # belong to the child.
1131 if t == '=':
1137 if t == '=':
1132 child[0][b1:b2] = parent[0][a1:a2]
1138 child[0][b1:b2] = parent[0][a1:a2]
1133
1139
1134 if skipchild:
1140 if skipchild:
1135 # Now try and match up anything that couldn't be matched,
1141 # Now try and match up anything that couldn't be matched,
1136 # Reversing pblocks maintains bias towards p2, matching above
1142 # Reversing pblocks maintains bias towards p2, matching above
1137 # behavior.
1143 # behavior.
1138 pblocks.reverse()
1144 pblocks.reverse()
1139
1145
1140 # The heuristics are:
1146 # The heuristics are:
1141 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1147 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1142 # This could potentially be smarter but works well enough.
1148 # This could potentially be smarter but works well enough.
1143 # * For a non-matching section, do a best-effort fit. Match lines in
1149 # * For a non-matching section, do a best-effort fit. Match lines in
1144 # diff hunks 1:1, dropping lines as necessary.
1150 # diff hunks 1:1, dropping lines as necessary.
1145 # * Repeat the last line as a last resort.
1151 # * Repeat the last line as a last resort.
1146
1152
1147 # First, replace as much as possible without repeating the last line.
1153 # First, replace as much as possible without repeating the last line.
1148 remaining = [(parent, []) for parent, _blocks in pblocks]
1154 remaining = [(parent, []) for parent, _blocks in pblocks]
1149 for idx, (parent, blocks) in enumerate(pblocks):
1155 for idx, (parent, blocks) in enumerate(pblocks):
1150 for (a1, a2, b1, b2), _t in blocks:
1156 for (a1, a2, b1, b2), _t in blocks:
1151 if a2 - a1 >= b2 - b1:
1157 if a2 - a1 >= b2 - b1:
1152 for bk in xrange(b1, b2):
1158 for bk in xrange(b1, b2):
1153 if child[0][bk][0] == childfctx:
1159 if child[0][bk][0] == childfctx:
1154 ak = min(a1 + (bk - b1), a2 - 1)
1160 ak = min(a1 + (bk - b1), a2 - 1)
1155 child[0][bk] = parent[0][ak]
1161 child[0][bk] = parent[0][ak]
1156 else:
1162 else:
1157 remaining[idx][1].append((a1, a2, b1, b2))
1163 remaining[idx][1].append((a1, a2, b1, b2))
1158
1164
1159 # Then, look at anything left, which might involve repeating the last
1165 # Then, look at anything left, which might involve repeating the last
1160 # line.
1166 # line.
1161 for parent, blocks in remaining:
1167 for parent, blocks in remaining:
1162 for a1, a2, b1, b2 in blocks:
1168 for a1, a2, b1, b2 in blocks:
1163 for bk in xrange(b1, b2):
1169 for bk in xrange(b1, b2):
1164 if child[0][bk][0] == childfctx:
1170 if child[0][bk][0] == childfctx:
1165 ak = min(a1 + (bk - b1), a2 - 1)
1171 ak = min(a1 + (bk - b1), a2 - 1)
1166 child[0][bk] = parent[0][ak]
1172 child[0][bk] = parent[0][ak]
1167 return child
1173 return child
1168
1174
1169 class filectx(basefilectx):
1175 class filectx(basefilectx):
1170 """A filecontext object makes access to data related to a particular
1176 """A filecontext object makes access to data related to a particular
1171 filerevision convenient."""
1177 filerevision convenient."""
1172 def __init__(self, repo, path, changeid=None, fileid=None,
1178 def __init__(self, repo, path, changeid=None, fileid=None,
1173 filelog=None, changectx=None):
1179 filelog=None, changectx=None):
1174 """changeid can be a changeset revision, node, or tag.
1180 """changeid can be a changeset revision, node, or tag.
1175 fileid can be a file revision or node."""
1181 fileid can be a file revision or node."""
1176 self._repo = repo
1182 self._repo = repo
1177 self._path = path
1183 self._path = path
1178
1184
1179 assert (changeid is not None
1185 assert (changeid is not None
1180 or fileid is not None
1186 or fileid is not None
1181 or changectx is not None), \
1187 or changectx is not None), \
1182 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1188 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1183 % (changeid, fileid, changectx))
1189 % (changeid, fileid, changectx))
1184
1190
1185 if filelog is not None:
1191 if filelog is not None:
1186 self._filelog = filelog
1192 self._filelog = filelog
1187
1193
1188 if changeid is not None:
1194 if changeid is not None:
1189 self._changeid = changeid
1195 self._changeid = changeid
1190 if changectx is not None:
1196 if changectx is not None:
1191 self._changectx = changectx
1197 self._changectx = changectx
1192 if fileid is not None:
1198 if fileid is not None:
1193 self._fileid = fileid
1199 self._fileid = fileid
1194
1200
1195 @propertycache
1201 @propertycache
1196 def _changectx(self):
1202 def _changectx(self):
1197 try:
1203 try:
1198 return changectx(self._repo, self._changeid)
1204 return changectx(self._repo, self._changeid)
1199 except error.FilteredRepoLookupError:
1205 except error.FilteredRepoLookupError:
1200 # Linkrev may point to any revision in the repository. When the
1206 # Linkrev may point to any revision in the repository. When the
1201 # repository is filtered this may lead to `filectx` trying to build
1207 # repository is filtered this may lead to `filectx` trying to build
1202 # `changectx` for filtered revision. In such case we fallback to
1208 # `changectx` for filtered revision. In such case we fallback to
1203 # creating `changectx` on the unfiltered version of the reposition.
1209 # creating `changectx` on the unfiltered version of the reposition.
1204 # This fallback should not be an issue because `changectx` from
1210 # This fallback should not be an issue because `changectx` from
1205 # `filectx` are not used in complex operations that care about
1211 # `filectx` are not used in complex operations that care about
1206 # filtering.
1212 # filtering.
1207 #
1213 #
1208 # This fallback is a cheap and dirty fix that prevent several
1214 # This fallback is a cheap and dirty fix that prevent several
1209 # crashes. It does not ensure the behavior is correct. However the
1215 # crashes. It does not ensure the behavior is correct. However the
1210 # behavior was not correct before filtering either and "incorrect
1216 # behavior was not correct before filtering either and "incorrect
1211 # behavior" is seen as better as "crash"
1217 # behavior" is seen as better as "crash"
1212 #
1218 #
1213 # Linkrevs have several serious troubles with filtering that are
1219 # Linkrevs have several serious troubles with filtering that are
1214 # complicated to solve. Proper handling of the issue here should be
1220 # complicated to solve. Proper handling of the issue here should be
1215 # considered when solving linkrev issue are on the table.
1221 # considered when solving linkrev issue are on the table.
1216 return changectx(self._repo.unfiltered(), self._changeid)
1222 return changectx(self._repo.unfiltered(), self._changeid)
1217
1223
1218 def filectx(self, fileid, changeid=None):
1224 def filectx(self, fileid, changeid=None):
1219 '''opens an arbitrary revision of the file without
1225 '''opens an arbitrary revision of the file without
1220 opening a new filelog'''
1226 opening a new filelog'''
1221 return filectx(self._repo, self._path, fileid=fileid,
1227 return filectx(self._repo, self._path, fileid=fileid,
1222 filelog=self._filelog, changeid=changeid)
1228 filelog=self._filelog, changeid=changeid)
1223
1229
1224 def rawdata(self):
1230 def rawdata(self):
1225 return self._filelog.revision(self._filenode, raw=True)
1231 return self._filelog.revision(self._filenode, raw=True)
1226
1232
1227 def rawflags(self):
1233 def rawflags(self):
1228 """low-level revlog flags"""
1234 """low-level revlog flags"""
1229 return self._filelog.flags(self._filerev)
1235 return self._filelog.flags(self._filerev)
1230
1236
1231 def data(self):
1237 def data(self):
1232 try:
1238 try:
1233 return self._filelog.read(self._filenode)
1239 return self._filelog.read(self._filenode)
1234 except error.CensoredNodeError:
1240 except error.CensoredNodeError:
1235 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1241 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1236 return ""
1242 return ""
1237 raise error.Abort(_("censored node: %s") % short(self._filenode),
1243 raise error.Abort(_("censored node: %s") % short(self._filenode),
1238 hint=_("set censor.policy to ignore errors"))
1244 hint=_("set censor.policy to ignore errors"))
1239
1245
1240 def size(self):
1246 def size(self):
1241 return self._filelog.size(self._filerev)
1247 return self._filelog.size(self._filerev)
1242
1248
1243 @propertycache
1249 @propertycache
1244 def _copied(self):
1250 def _copied(self):
1245 """check if file was actually renamed in this changeset revision
1251 """check if file was actually renamed in this changeset revision
1246
1252
1247 If rename logged in file revision, we report copy for changeset only
1253 If rename logged in file revision, we report copy for changeset only
1248 if file revisions linkrev points back to the changeset in question
1254 if file revisions linkrev points back to the changeset in question
1249 or both changeset parents contain different file revisions.
1255 or both changeset parents contain different file revisions.
1250 """
1256 """
1251
1257
1252 renamed = self._filelog.renamed(self._filenode)
1258 renamed = self._filelog.renamed(self._filenode)
1253 if not renamed:
1259 if not renamed:
1254 return renamed
1260 return renamed
1255
1261
1256 if self.rev() == self.linkrev():
1262 if self.rev() == self.linkrev():
1257 return renamed
1263 return renamed
1258
1264
1259 name = self.path()
1265 name = self.path()
1260 fnode = self._filenode
1266 fnode = self._filenode
1261 for p in self._changectx.parents():
1267 for p in self._changectx.parents():
1262 try:
1268 try:
1263 if fnode == p.filenode(name):
1269 if fnode == p.filenode(name):
1264 return None
1270 return None
1265 except error.LookupError:
1271 except error.LookupError:
1266 pass
1272 pass
1267 return renamed
1273 return renamed
1268
1274
1269 def children(self):
1275 def children(self):
1270 # hard for renames
1276 # hard for renames
1271 c = self._filelog.children(self._filenode)
1277 c = self._filelog.children(self._filenode)
1272 return [filectx(self._repo, self._path, fileid=x,
1278 return [filectx(self._repo, self._path, fileid=x,
1273 filelog=self._filelog) for x in c]
1279 filelog=self._filelog) for x in c]
1274
1280
1275 class committablectx(basectx):
1281 class committablectx(basectx):
1276 """A committablectx object provides common functionality for a context that
1282 """A committablectx object provides common functionality for a context that
1277 wants the ability to commit, e.g. workingctx or memctx."""
1283 wants the ability to commit, e.g. workingctx or memctx."""
1278 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 def __init__(self, repo, text="", user=None, date=None, extra=None,
1279 changes=None):
1285 changes=None):
1280 self._repo = repo
1286 self._repo = repo
1281 self._rev = None
1287 self._rev = None
1282 self._node = None
1288 self._node = None
1283 self._text = text
1289 self._text = text
1284 if date:
1290 if date:
1285 self._date = util.parsedate(date)
1291 self._date = util.parsedate(date)
1286 if user:
1292 if user:
1287 self._user = user
1293 self._user = user
1288 if changes:
1294 if changes:
1289 self._status = changes
1295 self._status = changes
1290
1296
1291 self._extra = {}
1297 self._extra = {}
1292 if extra:
1298 if extra:
1293 self._extra = extra.copy()
1299 self._extra = extra.copy()
1294 if 'branch' not in self._extra:
1300 if 'branch' not in self._extra:
1295 try:
1301 try:
1296 branch = encoding.fromlocal(self._repo.dirstate.branch())
1302 branch = encoding.fromlocal(self._repo.dirstate.branch())
1297 except UnicodeDecodeError:
1303 except UnicodeDecodeError:
1298 raise error.Abort(_('branch name not in UTF-8!'))
1304 raise error.Abort(_('branch name not in UTF-8!'))
1299 self._extra['branch'] = branch
1305 self._extra['branch'] = branch
1300 if self._extra['branch'] == '':
1306 if self._extra['branch'] == '':
1301 self._extra['branch'] = 'default'
1307 self._extra['branch'] = 'default'
1302
1308
1303 def __str__(self):
1309 def __str__(self):
1304 return str(self._parents[0]) + r"+"
1310 return str(self._parents[0]) + r"+"
1305
1311
1306 def __bytes__(self):
1312 def __bytes__(self):
1307 return bytes(self._parents[0]) + "+"
1313 return bytes(self._parents[0]) + "+"
1308
1314
1309 def __nonzero__(self):
1315 def __nonzero__(self):
1310 return True
1316 return True
1311
1317
1312 __bool__ = __nonzero__
1318 __bool__ = __nonzero__
1313
1319
1314 def _buildflagfunc(self):
1320 def _buildflagfunc(self):
1315 # Create a fallback function for getting file flags when the
1321 # Create a fallback function for getting file flags when the
1316 # filesystem doesn't support them
1322 # filesystem doesn't support them
1317
1323
1318 copiesget = self._repo.dirstate.copies().get
1324 copiesget = self._repo.dirstate.copies().get
1319 parents = self.parents()
1325 parents = self.parents()
1320 if len(parents) < 2:
1326 if len(parents) < 2:
1321 # when we have one parent, it's easy: copy from parent
1327 # when we have one parent, it's easy: copy from parent
1322 man = parents[0].manifest()
1328 man = parents[0].manifest()
1323 def func(f):
1329 def func(f):
1324 f = copiesget(f, f)
1330 f = copiesget(f, f)
1325 return man.flags(f)
1331 return man.flags(f)
1326 else:
1332 else:
1327 # merges are tricky: we try to reconstruct the unstored
1333 # merges are tricky: we try to reconstruct the unstored
1328 # result from the merge (issue1802)
1334 # result from the merge (issue1802)
1329 p1, p2 = parents
1335 p1, p2 = parents
1330 pa = p1.ancestor(p2)
1336 pa = p1.ancestor(p2)
1331 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1337 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1332
1338
1333 def func(f):
1339 def func(f):
1334 f = copiesget(f, f) # may be wrong for merges with copies
1340 f = copiesget(f, f) # may be wrong for merges with copies
1335 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1341 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1336 if fl1 == fl2:
1342 if fl1 == fl2:
1337 return fl1
1343 return fl1
1338 if fl1 == fla:
1344 if fl1 == fla:
1339 return fl2
1345 return fl2
1340 if fl2 == fla:
1346 if fl2 == fla:
1341 return fl1
1347 return fl1
1342 return '' # punt for conflicts
1348 return '' # punt for conflicts
1343
1349
1344 return func
1350 return func
1345
1351
1346 @propertycache
1352 @propertycache
1347 def _flagfunc(self):
1353 def _flagfunc(self):
1348 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1354 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1349
1355
1350 @propertycache
1356 @propertycache
1351 def _status(self):
1357 def _status(self):
1352 return self._repo.status()
1358 return self._repo.status()
1353
1359
1354 @propertycache
1360 @propertycache
1355 def _user(self):
1361 def _user(self):
1356 return self._repo.ui.username()
1362 return self._repo.ui.username()
1357
1363
1358 @propertycache
1364 @propertycache
1359 def _date(self):
1365 def _date(self):
1360 ui = self._repo.ui
1366 ui = self._repo.ui
1361 date = ui.configdate('devel', 'default-date')
1367 date = ui.configdate('devel', 'default-date')
1362 if date is None:
1368 if date is None:
1363 date = util.makedate()
1369 date = util.makedate()
1364 return date
1370 return date
1365
1371
1366 def subrev(self, subpath):
1372 def subrev(self, subpath):
1367 return None
1373 return None
1368
1374
1369 def manifestnode(self):
1375 def manifestnode(self):
1370 return None
1376 return None
1371 def user(self):
1377 def user(self):
1372 return self._user or self._repo.ui.username()
1378 return self._user or self._repo.ui.username()
1373 def date(self):
1379 def date(self):
1374 return self._date
1380 return self._date
1375 def description(self):
1381 def description(self):
1376 return self._text
1382 return self._text
1377 def files(self):
1383 def files(self):
1378 return sorted(self._status.modified + self._status.added +
1384 return sorted(self._status.modified + self._status.added +
1379 self._status.removed)
1385 self._status.removed)
1380
1386
1381 def modified(self):
1387 def modified(self):
1382 return self._status.modified
1388 return self._status.modified
1383 def added(self):
1389 def added(self):
1384 return self._status.added
1390 return self._status.added
1385 def removed(self):
1391 def removed(self):
1386 return self._status.removed
1392 return self._status.removed
1387 def deleted(self):
1393 def deleted(self):
1388 return self._status.deleted
1394 return self._status.deleted
1389 def branch(self):
1395 def branch(self):
1390 return encoding.tolocal(self._extra['branch'])
1396 return encoding.tolocal(self._extra['branch'])
1391 def closesbranch(self):
1397 def closesbranch(self):
1392 return 'close' in self._extra
1398 return 'close' in self._extra
1393 def extra(self):
1399 def extra(self):
1394 return self._extra
1400 return self._extra
1395
1401
1396 def tags(self):
1402 def tags(self):
1397 return []
1403 return []
1398
1404
1399 def bookmarks(self):
1405 def bookmarks(self):
1400 b = []
1406 b = []
1401 for p in self.parents():
1407 for p in self.parents():
1402 b.extend(p.bookmarks())
1408 b.extend(p.bookmarks())
1403 return b
1409 return b
1404
1410
1405 def phase(self):
1411 def phase(self):
1406 phase = phases.draft # default phase to draft
1412 phase = phases.draft # default phase to draft
1407 for p in self.parents():
1413 for p in self.parents():
1408 phase = max(phase, p.phase())
1414 phase = max(phase, p.phase())
1409 return phase
1415 return phase
1410
1416
1411 def hidden(self):
1417 def hidden(self):
1412 return False
1418 return False
1413
1419
1414 def children(self):
1420 def children(self):
1415 return []
1421 return []
1416
1422
1417 def flags(self, path):
1423 def flags(self, path):
1418 if r'_manifest' in self.__dict__:
1424 if r'_manifest' in self.__dict__:
1419 try:
1425 try:
1420 return self._manifest.flags(path)
1426 return self._manifest.flags(path)
1421 except KeyError:
1427 except KeyError:
1422 return ''
1428 return ''
1423
1429
1424 try:
1430 try:
1425 return self._flagfunc(path)
1431 return self._flagfunc(path)
1426 except OSError:
1432 except OSError:
1427 return ''
1433 return ''
1428
1434
1429 def ancestor(self, c2):
1435 def ancestor(self, c2):
1430 """return the "best" ancestor context of self and c2"""
1436 """return the "best" ancestor context of self and c2"""
1431 return self._parents[0].ancestor(c2) # punt on two parents for now
1437 return self._parents[0].ancestor(c2) # punt on two parents for now
1432
1438
1433 def walk(self, match):
1439 def walk(self, match):
1434 '''Generates matching file names.'''
1440 '''Generates matching file names.'''
1435 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1441 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1436 True, False))
1442 True, False))
1437
1443
1438 def matches(self, match):
1444 def matches(self, match):
1439 return sorted(self._repo.dirstate.matches(match))
1445 return sorted(self._repo.dirstate.matches(match))
1440
1446
1441 def ancestors(self):
1447 def ancestors(self):
1442 for p in self._parents:
1448 for p in self._parents:
1443 yield p
1449 yield p
1444 for a in self._repo.changelog.ancestors(
1450 for a in self._repo.changelog.ancestors(
1445 [p.rev() for p in self._parents]):
1451 [p.rev() for p in self._parents]):
1446 yield changectx(self._repo, a)
1452 yield changectx(self._repo, a)
1447
1453
1448 def markcommitted(self, node):
1454 def markcommitted(self, node):
1449 """Perform post-commit cleanup necessary after committing this ctx
1455 """Perform post-commit cleanup necessary after committing this ctx
1450
1456
1451 Specifically, this updates backing stores this working context
1457 Specifically, this updates backing stores this working context
1452 wraps to reflect the fact that the changes reflected by this
1458 wraps to reflect the fact that the changes reflected by this
1453 workingctx have been committed. For example, it marks
1459 workingctx have been committed. For example, it marks
1454 modified and added files as normal in the dirstate.
1460 modified and added files as normal in the dirstate.
1455
1461
1456 """
1462 """
1457
1463
1458 with self._repo.dirstate.parentchange():
1464 with self._repo.dirstate.parentchange():
1459 for f in self.modified() + self.added():
1465 for f in self.modified() + self.added():
1460 self._repo.dirstate.normal(f)
1466 self._repo.dirstate.normal(f)
1461 for f in self.removed():
1467 for f in self.removed():
1462 self._repo.dirstate.drop(f)
1468 self._repo.dirstate.drop(f)
1463 self._repo.dirstate.setparents(node)
1469 self._repo.dirstate.setparents(node)
1464
1470
1465 # write changes out explicitly, because nesting wlock at
1471 # write changes out explicitly, because nesting wlock at
1466 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1472 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1467 # from immediately doing so for subsequent changing files
1473 # from immediately doing so for subsequent changing files
1468 self._repo.dirstate.write(self._repo.currenttransaction())
1474 self._repo.dirstate.write(self._repo.currenttransaction())
1469
1475
1470 def dirty(self, missing=False, merge=True, branch=True):
1476 def dirty(self, missing=False, merge=True, branch=True):
1471 return False
1477 return False
1472
1478
1473 class workingctx(committablectx):
1479 class workingctx(committablectx):
1474 """A workingctx object makes access to data related to
1480 """A workingctx object makes access to data related to
1475 the current working directory convenient.
1481 the current working directory convenient.
1476 date - any valid date string or (unixtime, offset), or None.
1482 date - any valid date string or (unixtime, offset), or None.
1477 user - username string, or None.
1483 user - username string, or None.
1478 extra - a dictionary of extra values, or None.
1484 extra - a dictionary of extra values, or None.
1479 changes - a list of file lists as returned by localrepo.status()
1485 changes - a list of file lists as returned by localrepo.status()
1480 or None to use the repository status.
1486 or None to use the repository status.
1481 """
1487 """
1482 def __init__(self, repo, text="", user=None, date=None, extra=None,
1488 def __init__(self, repo, text="", user=None, date=None, extra=None,
1483 changes=None):
1489 changes=None):
1484 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1490 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1485
1491
1486 def __iter__(self):
1492 def __iter__(self):
1487 d = self._repo.dirstate
1493 d = self._repo.dirstate
1488 for f in d:
1494 for f in d:
1489 if d[f] != 'r':
1495 if d[f] != 'r':
1490 yield f
1496 yield f
1491
1497
1492 def __contains__(self, key):
1498 def __contains__(self, key):
1493 return self._repo.dirstate[key] not in "?r"
1499 return self._repo.dirstate[key] not in "?r"
1494
1500
1495 def hex(self):
1501 def hex(self):
1496 return hex(wdirid)
1502 return hex(wdirid)
1497
1503
1498 @propertycache
1504 @propertycache
1499 def _parents(self):
1505 def _parents(self):
1500 p = self._repo.dirstate.parents()
1506 p = self._repo.dirstate.parents()
1501 if p[1] == nullid:
1507 if p[1] == nullid:
1502 p = p[:-1]
1508 p = p[:-1]
1503 return [changectx(self._repo, x) for x in p]
1509 return [changectx(self._repo, x) for x in p]
1504
1510
1505 def filectx(self, path, filelog=None):
1511 def filectx(self, path, filelog=None):
1506 """get a file context from the working directory"""
1512 """get a file context from the working directory"""
1507 return workingfilectx(self._repo, path, workingctx=self,
1513 return workingfilectx(self._repo, path, workingctx=self,
1508 filelog=filelog)
1514 filelog=filelog)
1509
1515
1510 def dirty(self, missing=False, merge=True, branch=True):
1516 def dirty(self, missing=False, merge=True, branch=True):
1511 "check whether a working directory is modified"
1517 "check whether a working directory is modified"
1512 # check subrepos first
1518 # check subrepos first
1513 for s in sorted(self.substate):
1519 for s in sorted(self.substate):
1514 if self.sub(s).dirty():
1520 if self.sub(s).dirty():
1515 return True
1521 return True
1516 # check current working dir
1522 # check current working dir
1517 return ((merge and self.p2()) or
1523 return ((merge and self.p2()) or
1518 (branch and self.branch() != self.p1().branch()) or
1524 (branch and self.branch() != self.p1().branch()) or
1519 self.modified() or self.added() or self.removed() or
1525 self.modified() or self.added() or self.removed() or
1520 (missing and self.deleted()))
1526 (missing and self.deleted()))
1521
1527
1522 def add(self, list, prefix=""):
1528 def add(self, list, prefix=""):
1523 join = lambda f: os.path.join(prefix, f)
1529 join = lambda f: os.path.join(prefix, f)
1524 with self._repo.wlock():
1530 with self._repo.wlock():
1525 ui, ds = self._repo.ui, self._repo.dirstate
1531 ui, ds = self._repo.ui, self._repo.dirstate
1526 rejected = []
1532 rejected = []
1527 lstat = self._repo.wvfs.lstat
1533 lstat = self._repo.wvfs.lstat
1528 for f in list:
1534 for f in list:
1529 scmutil.checkportable(ui, join(f))
1535 scmutil.checkportable(ui, join(f))
1530 try:
1536 try:
1531 st = lstat(f)
1537 st = lstat(f)
1532 except OSError:
1538 except OSError:
1533 ui.warn(_("%s does not exist!\n") % join(f))
1539 ui.warn(_("%s does not exist!\n") % join(f))
1534 rejected.append(f)
1540 rejected.append(f)
1535 continue
1541 continue
1536 if st.st_size > 10000000:
1542 if st.st_size > 10000000:
1537 ui.warn(_("%s: up to %d MB of RAM may be required "
1543 ui.warn(_("%s: up to %d MB of RAM may be required "
1538 "to manage this file\n"
1544 "to manage this file\n"
1539 "(use 'hg revert %s' to cancel the "
1545 "(use 'hg revert %s' to cancel the "
1540 "pending addition)\n")
1546 "pending addition)\n")
1541 % (f, 3 * st.st_size // 1000000, join(f)))
1547 % (f, 3 * st.st_size // 1000000, join(f)))
1542 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1548 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1543 ui.warn(_("%s not added: only files and symlinks "
1549 ui.warn(_("%s not added: only files and symlinks "
1544 "supported currently\n") % join(f))
1550 "supported currently\n") % join(f))
1545 rejected.append(f)
1551 rejected.append(f)
1546 elif ds[f] in 'amn':
1552 elif ds[f] in 'amn':
1547 ui.warn(_("%s already tracked!\n") % join(f))
1553 ui.warn(_("%s already tracked!\n") % join(f))
1548 elif ds[f] == 'r':
1554 elif ds[f] == 'r':
1549 ds.normallookup(f)
1555 ds.normallookup(f)
1550 else:
1556 else:
1551 ds.add(f)
1557 ds.add(f)
1552 return rejected
1558 return rejected
1553
1559
1554 def forget(self, files, prefix=""):
1560 def forget(self, files, prefix=""):
1555 join = lambda f: os.path.join(prefix, f)
1561 join = lambda f: os.path.join(prefix, f)
1556 with self._repo.wlock():
1562 with self._repo.wlock():
1557 rejected = []
1563 rejected = []
1558 for f in files:
1564 for f in files:
1559 if f not in self._repo.dirstate:
1565 if f not in self._repo.dirstate:
1560 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1566 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1561 rejected.append(f)
1567 rejected.append(f)
1562 elif self._repo.dirstate[f] != 'a':
1568 elif self._repo.dirstate[f] != 'a':
1563 self._repo.dirstate.remove(f)
1569 self._repo.dirstate.remove(f)
1564 else:
1570 else:
1565 self._repo.dirstate.drop(f)
1571 self._repo.dirstate.drop(f)
1566 return rejected
1572 return rejected
1567
1573
1568 def undelete(self, list):
1574 def undelete(self, list):
1569 pctxs = self.parents()
1575 pctxs = self.parents()
1570 with self._repo.wlock():
1576 with self._repo.wlock():
1571 for f in list:
1577 for f in list:
1572 if self._repo.dirstate[f] != 'r':
1578 if self._repo.dirstate[f] != 'r':
1573 self._repo.ui.warn(_("%s not removed!\n") % f)
1579 self._repo.ui.warn(_("%s not removed!\n") % f)
1574 else:
1580 else:
1575 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1581 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1576 t = fctx.data()
1582 t = fctx.data()
1577 self._repo.wwrite(f, t, fctx.flags())
1583 self._repo.wwrite(f, t, fctx.flags())
1578 self._repo.dirstate.normal(f)
1584 self._repo.dirstate.normal(f)
1579
1585
1580 def copy(self, source, dest):
1586 def copy(self, source, dest):
1581 try:
1587 try:
1582 st = self._repo.wvfs.lstat(dest)
1588 st = self._repo.wvfs.lstat(dest)
1583 except OSError as err:
1589 except OSError as err:
1584 if err.errno != errno.ENOENT:
1590 if err.errno != errno.ENOENT:
1585 raise
1591 raise
1586 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1592 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1587 return
1593 return
1588 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1594 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1589 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1595 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1590 "symbolic link\n") % dest)
1596 "symbolic link\n") % dest)
1591 else:
1597 else:
1592 with self._repo.wlock():
1598 with self._repo.wlock():
1593 if self._repo.dirstate[dest] in '?':
1599 if self._repo.dirstate[dest] in '?':
1594 self._repo.dirstate.add(dest)
1600 self._repo.dirstate.add(dest)
1595 elif self._repo.dirstate[dest] in 'r':
1601 elif self._repo.dirstate[dest] in 'r':
1596 self._repo.dirstate.normallookup(dest)
1602 self._repo.dirstate.normallookup(dest)
1597 self._repo.dirstate.copy(source, dest)
1603 self._repo.dirstate.copy(source, dest)
1598
1604
1599 def match(self, pats=None, include=None, exclude=None, default='glob',
1605 def match(self, pats=None, include=None, exclude=None, default='glob',
1600 listsubrepos=False, badfn=None):
1606 listsubrepos=False, badfn=None):
1601 r = self._repo
1607 r = self._repo
1602
1608
1603 # Only a case insensitive filesystem needs magic to translate user input
1609 # Only a case insensitive filesystem needs magic to translate user input
1604 # to actual case in the filesystem.
1610 # to actual case in the filesystem.
1605 icasefs = not util.fscasesensitive(r.root)
1611 icasefs = not util.fscasesensitive(r.root)
1606 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1612 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1607 default, auditor=r.auditor, ctx=self,
1613 default, auditor=r.auditor, ctx=self,
1608 listsubrepos=listsubrepos, badfn=badfn,
1614 listsubrepos=listsubrepos, badfn=badfn,
1609 icasefs=icasefs)
1615 icasefs=icasefs)
1610
1616
1611 def _filtersuspectsymlink(self, files):
1617 def _filtersuspectsymlink(self, files):
1612 if not files or self._repo.dirstate._checklink:
1618 if not files or self._repo.dirstate._checklink:
1613 return files
1619 return files
1614
1620
1615 # Symlink placeholders may get non-symlink-like contents
1621 # Symlink placeholders may get non-symlink-like contents
1616 # via user error or dereferencing by NFS or Samba servers,
1622 # via user error or dereferencing by NFS or Samba servers,
1617 # so we filter out any placeholders that don't look like a
1623 # so we filter out any placeholders that don't look like a
1618 # symlink
1624 # symlink
1619 sane = []
1625 sane = []
1620 for f in files:
1626 for f in files:
1621 if self.flags(f) == 'l':
1627 if self.flags(f) == 'l':
1622 d = self[f].data()
1628 d = self[f].data()
1623 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1629 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1624 self._repo.ui.debug('ignoring suspect symlink placeholder'
1630 self._repo.ui.debug('ignoring suspect symlink placeholder'
1625 ' "%s"\n' % f)
1631 ' "%s"\n' % f)
1626 continue
1632 continue
1627 sane.append(f)
1633 sane.append(f)
1628 return sane
1634 return sane
1629
1635
1630 def _checklookup(self, files):
1636 def _checklookup(self, files):
1631 # check for any possibly clean files
1637 # check for any possibly clean files
1632 if not files:
1638 if not files:
1633 return [], [], []
1639 return [], [], []
1634
1640
1635 modified = []
1641 modified = []
1636 deleted = []
1642 deleted = []
1637 fixup = []
1643 fixup = []
1638 pctx = self._parents[0]
1644 pctx = self._parents[0]
1639 # do a full compare of any files that might have changed
1645 # do a full compare of any files that might have changed
1640 for f in sorted(files):
1646 for f in sorted(files):
1641 try:
1647 try:
1642 # This will return True for a file that got replaced by a
1648 # This will return True for a file that got replaced by a
1643 # directory in the interim, but fixing that is pretty hard.
1649 # directory in the interim, but fixing that is pretty hard.
1644 if (f not in pctx or self.flags(f) != pctx.flags(f)
1650 if (f not in pctx or self.flags(f) != pctx.flags(f)
1645 or pctx[f].cmp(self[f])):
1651 or pctx[f].cmp(self[f])):
1646 modified.append(f)
1652 modified.append(f)
1647 else:
1653 else:
1648 fixup.append(f)
1654 fixup.append(f)
1649 except (IOError, OSError):
1655 except (IOError, OSError):
1650 # A file become inaccessible in between? Mark it as deleted,
1656 # A file become inaccessible in between? Mark it as deleted,
1651 # matching dirstate behavior (issue5584).
1657 # matching dirstate behavior (issue5584).
1652 # The dirstate has more complex behavior around whether a
1658 # The dirstate has more complex behavior around whether a
1653 # missing file matches a directory, etc, but we don't need to
1659 # missing file matches a directory, etc, but we don't need to
1654 # bother with that: if f has made it to this point, we're sure
1660 # bother with that: if f has made it to this point, we're sure
1655 # it's in the dirstate.
1661 # it's in the dirstate.
1656 deleted.append(f)
1662 deleted.append(f)
1657
1663
1658 return modified, deleted, fixup
1664 return modified, deleted, fixup
1659
1665
1660 def _poststatusfixup(self, status, fixup):
1666 def _poststatusfixup(self, status, fixup):
1661 """update dirstate for files that are actually clean"""
1667 """update dirstate for files that are actually clean"""
1662 poststatus = self._repo.postdsstatus()
1668 poststatus = self._repo.postdsstatus()
1663 if fixup or poststatus:
1669 if fixup or poststatus:
1664 try:
1670 try:
1665 oldid = self._repo.dirstate.identity()
1671 oldid = self._repo.dirstate.identity()
1666
1672
1667 # updating the dirstate is optional
1673 # updating the dirstate is optional
1668 # so we don't wait on the lock
1674 # so we don't wait on the lock
1669 # wlock can invalidate the dirstate, so cache normal _after_
1675 # wlock can invalidate the dirstate, so cache normal _after_
1670 # taking the lock
1676 # taking the lock
1671 with self._repo.wlock(False):
1677 with self._repo.wlock(False):
1672 if self._repo.dirstate.identity() == oldid:
1678 if self._repo.dirstate.identity() == oldid:
1673 if fixup:
1679 if fixup:
1674 normal = self._repo.dirstate.normal
1680 normal = self._repo.dirstate.normal
1675 for f in fixup:
1681 for f in fixup:
1676 normal(f)
1682 normal(f)
1677 # write changes out explicitly, because nesting
1683 # write changes out explicitly, because nesting
1678 # wlock at runtime may prevent 'wlock.release()'
1684 # wlock at runtime may prevent 'wlock.release()'
1679 # after this block from doing so for subsequent
1685 # after this block from doing so for subsequent
1680 # changing files
1686 # changing files
1681 tr = self._repo.currenttransaction()
1687 tr = self._repo.currenttransaction()
1682 self._repo.dirstate.write(tr)
1688 self._repo.dirstate.write(tr)
1683
1689
1684 if poststatus:
1690 if poststatus:
1685 for ps in poststatus:
1691 for ps in poststatus:
1686 ps(self, status)
1692 ps(self, status)
1687 else:
1693 else:
1688 # in this case, writing changes out breaks
1694 # in this case, writing changes out breaks
1689 # consistency, because .hg/dirstate was
1695 # consistency, because .hg/dirstate was
1690 # already changed simultaneously after last
1696 # already changed simultaneously after last
1691 # caching (see also issue5584 for detail)
1697 # caching (see also issue5584 for detail)
1692 self._repo.ui.debug('skip updating dirstate: '
1698 self._repo.ui.debug('skip updating dirstate: '
1693 'identity mismatch\n')
1699 'identity mismatch\n')
1694 except error.LockError:
1700 except error.LockError:
1695 pass
1701 pass
1696 finally:
1702 finally:
1697 # Even if the wlock couldn't be grabbed, clear out the list.
1703 # Even if the wlock couldn't be grabbed, clear out the list.
1698 self._repo.clearpostdsstatus()
1704 self._repo.clearpostdsstatus()
1699
1705
1700 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1706 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1701 unknown=False):
1707 unknown=False):
1702 '''Gets the status from the dirstate -- internal use only.'''
1708 '''Gets the status from the dirstate -- internal use only.'''
1703 listignored, listclean, listunknown = ignored, clean, unknown
1709 listignored, listclean, listunknown = ignored, clean, unknown
1704 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1710 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1705 subrepos = []
1711 subrepos = []
1706 if '.hgsub' in self:
1712 if '.hgsub' in self:
1707 subrepos = sorted(self.substate)
1713 subrepos = sorted(self.substate)
1708 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1714 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1709 listclean, listunknown)
1715 listclean, listunknown)
1710
1716
1711 # check for any possibly clean files
1717 # check for any possibly clean files
1712 fixup = []
1718 fixup = []
1713 if cmp:
1719 if cmp:
1714 modified2, deleted2, fixup = self._checklookup(cmp)
1720 modified2, deleted2, fixup = self._checklookup(cmp)
1715 s.modified.extend(modified2)
1721 s.modified.extend(modified2)
1716 s.deleted.extend(deleted2)
1722 s.deleted.extend(deleted2)
1717
1723
1718 if fixup and listclean:
1724 if fixup and listclean:
1719 s.clean.extend(fixup)
1725 s.clean.extend(fixup)
1720
1726
1721 self._poststatusfixup(s, fixup)
1727 self._poststatusfixup(s, fixup)
1722
1728
1723 if match.always():
1729 if match.always():
1724 # cache for performance
1730 # cache for performance
1725 if s.unknown or s.ignored or s.clean:
1731 if s.unknown or s.ignored or s.clean:
1726 # "_status" is cached with list*=False in the normal route
1732 # "_status" is cached with list*=False in the normal route
1727 self._status = scmutil.status(s.modified, s.added, s.removed,
1733 self._status = scmutil.status(s.modified, s.added, s.removed,
1728 s.deleted, [], [], [])
1734 s.deleted, [], [], [])
1729 else:
1735 else:
1730 self._status = s
1736 self._status = s
1731
1737
1732 return s
1738 return s
1733
1739
1734 @propertycache
1740 @propertycache
1735 def _manifest(self):
1741 def _manifest(self):
1736 """generate a manifest corresponding to the values in self._status
1742 """generate a manifest corresponding to the values in self._status
1737
1743
1738 This reuse the file nodeid from parent, but we use special node
1744 This reuse the file nodeid from parent, but we use special node
1739 identifiers for added and modified files. This is used by manifests
1745 identifiers for added and modified files. This is used by manifests
1740 merge to see that files are different and by update logic to avoid
1746 merge to see that files are different and by update logic to avoid
1741 deleting newly added files.
1747 deleting newly added files.
1742 """
1748 """
1743 return self._buildstatusmanifest(self._status)
1749 return self._buildstatusmanifest(self._status)
1744
1750
1745 def _buildstatusmanifest(self, status):
1751 def _buildstatusmanifest(self, status):
1746 """Builds a manifest that includes the given status results."""
1752 """Builds a manifest that includes the given status results."""
1747 parents = self.parents()
1753 parents = self.parents()
1748
1754
1749 man = parents[0].manifest().copy()
1755 man = parents[0].manifest().copy()
1750
1756
1751 ff = self._flagfunc
1757 ff = self._flagfunc
1752 for i, l in ((addednodeid, status.added),
1758 for i, l in ((addednodeid, status.added),
1753 (modifiednodeid, status.modified)):
1759 (modifiednodeid, status.modified)):
1754 for f in l:
1760 for f in l:
1755 man[f] = i
1761 man[f] = i
1756 try:
1762 try:
1757 man.setflag(f, ff(f))
1763 man.setflag(f, ff(f))
1758 except OSError:
1764 except OSError:
1759 pass
1765 pass
1760
1766
1761 for f in status.deleted + status.removed:
1767 for f in status.deleted + status.removed:
1762 if f in man:
1768 if f in man:
1763 del man[f]
1769 del man[f]
1764
1770
1765 return man
1771 return man
1766
1772
1767 def _buildstatus(self, other, s, match, listignored, listclean,
1773 def _buildstatus(self, other, s, match, listignored, listclean,
1768 listunknown):
1774 listunknown):
1769 """build a status with respect to another context
1775 """build a status with respect to another context
1770
1776
1771 This includes logic for maintaining the fast path of status when
1777 This includes logic for maintaining the fast path of status when
1772 comparing the working directory against its parent, which is to skip
1778 comparing the working directory against its parent, which is to skip
1773 building a new manifest if self (working directory) is not comparing
1779 building a new manifest if self (working directory) is not comparing
1774 against its parent (repo['.']).
1780 against its parent (repo['.']).
1775 """
1781 """
1776 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1782 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1777 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1783 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1778 # might have accidentally ended up with the entire contents of the file
1784 # might have accidentally ended up with the entire contents of the file
1779 # they are supposed to be linking to.
1785 # they are supposed to be linking to.
1780 s.modified[:] = self._filtersuspectsymlink(s.modified)
1786 s.modified[:] = self._filtersuspectsymlink(s.modified)
1781 if other != self._repo['.']:
1787 if other != self._repo['.']:
1782 s = super(workingctx, self)._buildstatus(other, s, match,
1788 s = super(workingctx, self)._buildstatus(other, s, match,
1783 listignored, listclean,
1789 listignored, listclean,
1784 listunknown)
1790 listunknown)
1785 return s
1791 return s
1786
1792
1787 def _matchstatus(self, other, match):
1793 def _matchstatus(self, other, match):
1788 """override the match method with a filter for directory patterns
1794 """override the match method with a filter for directory patterns
1789
1795
1790 We use inheritance to customize the match.bad method only in cases of
1796 We use inheritance to customize the match.bad method only in cases of
1791 workingctx since it belongs only to the working directory when
1797 workingctx since it belongs only to the working directory when
1792 comparing against the parent changeset.
1798 comparing against the parent changeset.
1793
1799
1794 If we aren't comparing against the working directory's parent, then we
1800 If we aren't comparing against the working directory's parent, then we
1795 just use the default match object sent to us.
1801 just use the default match object sent to us.
1796 """
1802 """
1797 superself = super(workingctx, self)
1803 superself = super(workingctx, self)
1798 match = superself._matchstatus(other, match)
1804 match = superself._matchstatus(other, match)
1799 if other != self._repo['.']:
1805 if other != self._repo['.']:
1800 def bad(f, msg):
1806 def bad(f, msg):
1801 # 'f' may be a directory pattern from 'match.files()',
1807 # 'f' may be a directory pattern from 'match.files()',
1802 # so 'f not in ctx1' is not enough
1808 # so 'f not in ctx1' is not enough
1803 if f not in other and not other.hasdir(f):
1809 if f not in other and not other.hasdir(f):
1804 self._repo.ui.warn('%s: %s\n' %
1810 self._repo.ui.warn('%s: %s\n' %
1805 (self._repo.dirstate.pathto(f), msg))
1811 (self._repo.dirstate.pathto(f), msg))
1806 match.bad = bad
1812 match.bad = bad
1807 return match
1813 return match
1808
1814
1809 class committablefilectx(basefilectx):
1815 class committablefilectx(basefilectx):
1810 """A committablefilectx provides common functionality for a file context
1816 """A committablefilectx provides common functionality for a file context
1811 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1817 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1812 def __init__(self, repo, path, filelog=None, ctx=None):
1818 def __init__(self, repo, path, filelog=None, ctx=None):
1813 self._repo = repo
1819 self._repo = repo
1814 self._path = path
1820 self._path = path
1815 self._changeid = None
1821 self._changeid = None
1816 self._filerev = self._filenode = None
1822 self._filerev = self._filenode = None
1817
1823
1818 if filelog is not None:
1824 if filelog is not None:
1819 self._filelog = filelog
1825 self._filelog = filelog
1820 if ctx:
1826 if ctx:
1821 self._changectx = ctx
1827 self._changectx = ctx
1822
1828
1823 def __nonzero__(self):
1829 def __nonzero__(self):
1824 return True
1830 return True
1825
1831
1826 __bool__ = __nonzero__
1832 __bool__ = __nonzero__
1827
1833
1828 def linkrev(self):
1834 def linkrev(self):
1829 # linked to self._changectx no matter if file is modified or not
1835 # linked to self._changectx no matter if file is modified or not
1830 return self.rev()
1836 return self.rev()
1831
1837
1832 def parents(self):
1838 def parents(self):
1833 '''return parent filectxs, following copies if necessary'''
1839 '''return parent filectxs, following copies if necessary'''
1834 def filenode(ctx, path):
1840 def filenode(ctx, path):
1835 return ctx._manifest.get(path, nullid)
1841 return ctx._manifest.get(path, nullid)
1836
1842
1837 path = self._path
1843 path = self._path
1838 fl = self._filelog
1844 fl = self._filelog
1839 pcl = self._changectx._parents
1845 pcl = self._changectx._parents
1840 renamed = self.renamed()
1846 renamed = self.renamed()
1841
1847
1842 if renamed:
1848 if renamed:
1843 pl = [renamed + (None,)]
1849 pl = [renamed + (None,)]
1844 else:
1850 else:
1845 pl = [(path, filenode(pcl[0], path), fl)]
1851 pl = [(path, filenode(pcl[0], path), fl)]
1846
1852
1847 for pc in pcl[1:]:
1853 for pc in pcl[1:]:
1848 pl.append((path, filenode(pc, path), fl))
1854 pl.append((path, filenode(pc, path), fl))
1849
1855
1850 return [self._parentfilectx(p, fileid=n, filelog=l)
1856 return [self._parentfilectx(p, fileid=n, filelog=l)
1851 for p, n, l in pl if n != nullid]
1857 for p, n, l in pl if n != nullid]
1852
1858
1853 def children(self):
1859 def children(self):
1854 return []
1860 return []
1855
1861
1856 class workingfilectx(committablefilectx):
1862 class workingfilectx(committablefilectx):
1857 """A workingfilectx object makes access to data related to a particular
1863 """A workingfilectx object makes access to data related to a particular
1858 file in the working directory convenient."""
1864 file in the working directory convenient."""
1859 def __init__(self, repo, path, filelog=None, workingctx=None):
1865 def __init__(self, repo, path, filelog=None, workingctx=None):
1860 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1866 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1861
1867
1862 @propertycache
1868 @propertycache
1863 def _changectx(self):
1869 def _changectx(self):
1864 return workingctx(self._repo)
1870 return workingctx(self._repo)
1865
1871
1866 def data(self):
1872 def data(self):
1867 return self._repo.wread(self._path)
1873 return self._repo.wread(self._path)
1868 def renamed(self):
1874 def renamed(self):
1869 rp = self._repo.dirstate.copied(self._path)
1875 rp = self._repo.dirstate.copied(self._path)
1870 if not rp:
1876 if not rp:
1871 return None
1877 return None
1872 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1878 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1873
1879
1874 def size(self):
1880 def size(self):
1875 return self._repo.wvfs.lstat(self._path).st_size
1881 return self._repo.wvfs.lstat(self._path).st_size
1876 def date(self):
1882 def date(self):
1877 t, tz = self._changectx.date()
1883 t, tz = self._changectx.date()
1878 try:
1884 try:
1879 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1885 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1880 except OSError as err:
1886 except OSError as err:
1881 if err.errno != errno.ENOENT:
1887 if err.errno != errno.ENOENT:
1882 raise
1888 raise
1883 return (t, tz)
1889 return (t, tz)
1884
1890
1885 def cmp(self, fctx):
1891 def cmp(self, fctx):
1886 """compare with other file context
1892 """compare with other file context
1887
1893
1888 returns True if different than fctx.
1894 returns True if different than fctx.
1889 """
1895 """
1890 # fctx should be a filectx (not a workingfilectx)
1896 # fctx should be a filectx (not a workingfilectx)
1891 # invert comparison to reuse the same code path
1897 # invert comparison to reuse the same code path
1892 return fctx.cmp(self)
1898 return fctx.cmp(self)
1893
1899
1894 def remove(self, ignoremissing=False):
1900 def remove(self, ignoremissing=False):
1895 """wraps unlink for a repo's working directory"""
1901 """wraps unlink for a repo's working directory"""
1896 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1902 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1897
1903
1898 def write(self, data, flags):
1904 def write(self, data, flags):
1899 """wraps repo.wwrite"""
1905 """wraps repo.wwrite"""
1900 self._repo.wwrite(self._path, data, flags)
1906 self._repo.wwrite(self._path, data, flags)
1901
1907
1902 class workingcommitctx(workingctx):
1908 class workingcommitctx(workingctx):
1903 """A workingcommitctx object makes access to data related to
1909 """A workingcommitctx object makes access to data related to
1904 the revision being committed convenient.
1910 the revision being committed convenient.
1905
1911
1906 This hides changes in the working directory, if they aren't
1912 This hides changes in the working directory, if they aren't
1907 committed in this context.
1913 committed in this context.
1908 """
1914 """
1909 def __init__(self, repo, changes,
1915 def __init__(self, repo, changes,
1910 text="", user=None, date=None, extra=None):
1916 text="", user=None, date=None, extra=None):
1911 super(workingctx, self).__init__(repo, text, user, date, extra,
1917 super(workingctx, self).__init__(repo, text, user, date, extra,
1912 changes)
1918 changes)
1913
1919
1914 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1920 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1915 unknown=False):
1921 unknown=False):
1916 """Return matched files only in ``self._status``
1922 """Return matched files only in ``self._status``
1917
1923
1918 Uncommitted files appear "clean" via this context, even if
1924 Uncommitted files appear "clean" via this context, even if
1919 they aren't actually so in the working directory.
1925 they aren't actually so in the working directory.
1920 """
1926 """
1921 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1927 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1922 if clean:
1928 if clean:
1923 clean = [f for f in self._manifest if f not in self._changedset]
1929 clean = [f for f in self._manifest if f not in self._changedset]
1924 else:
1930 else:
1925 clean = []
1931 clean = []
1926 return scmutil.status([f for f in self._status.modified if match(f)],
1932 return scmutil.status([f for f in self._status.modified if match(f)],
1927 [f for f in self._status.added if match(f)],
1933 [f for f in self._status.added if match(f)],
1928 [f for f in self._status.removed if match(f)],
1934 [f for f in self._status.removed if match(f)],
1929 [], [], [], clean)
1935 [], [], [], clean)
1930
1936
1931 @propertycache
1937 @propertycache
1932 def _changedset(self):
1938 def _changedset(self):
1933 """Return the set of files changed in this context
1939 """Return the set of files changed in this context
1934 """
1940 """
1935 changed = set(self._status.modified)
1941 changed = set(self._status.modified)
1936 changed.update(self._status.added)
1942 changed.update(self._status.added)
1937 changed.update(self._status.removed)
1943 changed.update(self._status.removed)
1938 return changed
1944 return changed
1939
1945
1940 def makecachingfilectxfn(func):
1946 def makecachingfilectxfn(func):
1941 """Create a filectxfn that caches based on the path.
1947 """Create a filectxfn that caches based on the path.
1942
1948
1943 We can't use util.cachefunc because it uses all arguments as the cache
1949 We can't use util.cachefunc because it uses all arguments as the cache
1944 key and this creates a cycle since the arguments include the repo and
1950 key and this creates a cycle since the arguments include the repo and
1945 memctx.
1951 memctx.
1946 """
1952 """
1947 cache = {}
1953 cache = {}
1948
1954
1949 def getfilectx(repo, memctx, path):
1955 def getfilectx(repo, memctx, path):
1950 if path not in cache:
1956 if path not in cache:
1951 cache[path] = func(repo, memctx, path)
1957 cache[path] = func(repo, memctx, path)
1952 return cache[path]
1958 return cache[path]
1953
1959
1954 return getfilectx
1960 return getfilectx
1955
1961
1956 def memfilefromctx(ctx):
1962 def memfilefromctx(ctx):
1957 """Given a context return a memfilectx for ctx[path]
1963 """Given a context return a memfilectx for ctx[path]
1958
1964
1959 This is a convenience method for building a memctx based on another
1965 This is a convenience method for building a memctx based on another
1960 context.
1966 context.
1961 """
1967 """
1962 def getfilectx(repo, memctx, path):
1968 def getfilectx(repo, memctx, path):
1963 fctx = ctx[path]
1969 fctx = ctx[path]
1964 # this is weird but apparently we only keep track of one parent
1970 # this is weird but apparently we only keep track of one parent
1965 # (why not only store that instead of a tuple?)
1971 # (why not only store that instead of a tuple?)
1966 copied = fctx.renamed()
1972 copied = fctx.renamed()
1967 if copied:
1973 if copied:
1968 copied = copied[0]
1974 copied = copied[0]
1969 return memfilectx(repo, path, fctx.data(),
1975 return memfilectx(repo, path, fctx.data(),
1970 islink=fctx.islink(), isexec=fctx.isexec(),
1976 islink=fctx.islink(), isexec=fctx.isexec(),
1971 copied=copied, memctx=memctx)
1977 copied=copied, memctx=memctx)
1972
1978
1973 return getfilectx
1979 return getfilectx
1974
1980
1975 def memfilefrompatch(patchstore):
1981 def memfilefrompatch(patchstore):
1976 """Given a patch (e.g. patchstore object) return a memfilectx
1982 """Given a patch (e.g. patchstore object) return a memfilectx
1977
1983
1978 This is a convenience method for building a memctx based on a patchstore.
1984 This is a convenience method for building a memctx based on a patchstore.
1979 """
1985 """
1980 def getfilectx(repo, memctx, path):
1986 def getfilectx(repo, memctx, path):
1981 data, mode, copied = patchstore.getfile(path)
1987 data, mode, copied = patchstore.getfile(path)
1982 if data is None:
1988 if data is None:
1983 return None
1989 return None
1984 islink, isexec = mode
1990 islink, isexec = mode
1985 return memfilectx(repo, path, data, islink=islink,
1991 return memfilectx(repo, path, data, islink=islink,
1986 isexec=isexec, copied=copied,
1992 isexec=isexec, copied=copied,
1987 memctx=memctx)
1993 memctx=memctx)
1988
1994
1989 return getfilectx
1995 return getfilectx
1990
1996
1991 class memctx(committablectx):
1997 class memctx(committablectx):
1992 """Use memctx to perform in-memory commits via localrepo.commitctx().
1998 """Use memctx to perform in-memory commits via localrepo.commitctx().
1993
1999
1994 Revision information is supplied at initialization time while
2000 Revision information is supplied at initialization time while
1995 related files data and is made available through a callback
2001 related files data and is made available through a callback
1996 mechanism. 'repo' is the current localrepo, 'parents' is a
2002 mechanism. 'repo' is the current localrepo, 'parents' is a
1997 sequence of two parent revisions identifiers (pass None for every
2003 sequence of two parent revisions identifiers (pass None for every
1998 missing parent), 'text' is the commit message and 'files' lists
2004 missing parent), 'text' is the commit message and 'files' lists
1999 names of files touched by the revision (normalized and relative to
2005 names of files touched by the revision (normalized and relative to
2000 repository root).
2006 repository root).
2001
2007
2002 filectxfn(repo, memctx, path) is a callable receiving the
2008 filectxfn(repo, memctx, path) is a callable receiving the
2003 repository, the current memctx object and the normalized path of
2009 repository, the current memctx object and the normalized path of
2004 requested file, relative to repository root. It is fired by the
2010 requested file, relative to repository root. It is fired by the
2005 commit function for every file in 'files', but calls order is
2011 commit function for every file in 'files', but calls order is
2006 undefined. If the file is available in the revision being
2012 undefined. If the file is available in the revision being
2007 committed (updated or added), filectxfn returns a memfilectx
2013 committed (updated or added), filectxfn returns a memfilectx
2008 object. If the file was removed, filectxfn return None for recent
2014 object. If the file was removed, filectxfn return None for recent
2009 Mercurial. Moved files are represented by marking the source file
2015 Mercurial. Moved files are represented by marking the source file
2010 removed and the new file added with copy information (see
2016 removed and the new file added with copy information (see
2011 memfilectx).
2017 memfilectx).
2012
2018
2013 user receives the committer name and defaults to current
2019 user receives the committer name and defaults to current
2014 repository username, date is the commit date in any format
2020 repository username, date is the commit date in any format
2015 supported by util.parsedate() and defaults to current date, extra
2021 supported by util.parsedate() and defaults to current date, extra
2016 is a dictionary of metadata or is left empty.
2022 is a dictionary of metadata or is left empty.
2017 """
2023 """
2018
2024
2019 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2025 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2020 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2026 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2021 # this field to determine what to do in filectxfn.
2027 # this field to determine what to do in filectxfn.
2022 _returnnoneformissingfiles = True
2028 _returnnoneformissingfiles = True
2023
2029
2024 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2030 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2025 date=None, extra=None, branch=None, editor=False):
2031 date=None, extra=None, branch=None, editor=False):
2026 super(memctx, self).__init__(repo, text, user, date, extra)
2032 super(memctx, self).__init__(repo, text, user, date, extra)
2027 self._rev = None
2033 self._rev = None
2028 self._node = None
2034 self._node = None
2029 parents = [(p or nullid) for p in parents]
2035 parents = [(p or nullid) for p in parents]
2030 p1, p2 = parents
2036 p1, p2 = parents
2031 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2037 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2032 files = sorted(set(files))
2038 files = sorted(set(files))
2033 self._files = files
2039 self._files = files
2034 if branch is not None:
2040 if branch is not None:
2035 self._extra['branch'] = encoding.fromlocal(branch)
2041 self._extra['branch'] = encoding.fromlocal(branch)
2036 self.substate = {}
2042 self.substate = {}
2037
2043
2038 if isinstance(filectxfn, patch.filestore):
2044 if isinstance(filectxfn, patch.filestore):
2039 filectxfn = memfilefrompatch(filectxfn)
2045 filectxfn = memfilefrompatch(filectxfn)
2040 elif not callable(filectxfn):
2046 elif not callable(filectxfn):
2041 # if store is not callable, wrap it in a function
2047 # if store is not callable, wrap it in a function
2042 filectxfn = memfilefromctx(filectxfn)
2048 filectxfn = memfilefromctx(filectxfn)
2043
2049
2044 # memoizing increases performance for e.g. vcs convert scenarios.
2050 # memoizing increases performance for e.g. vcs convert scenarios.
2045 self._filectxfn = makecachingfilectxfn(filectxfn)
2051 self._filectxfn = makecachingfilectxfn(filectxfn)
2046
2052
2047 if editor:
2053 if editor:
2048 self._text = editor(self._repo, self, [])
2054 self._text = editor(self._repo, self, [])
2049 self._repo.savecommitmessage(self._text)
2055 self._repo.savecommitmessage(self._text)
2050
2056
2051 def filectx(self, path, filelog=None):
2057 def filectx(self, path, filelog=None):
2052 """get a file context from the working directory
2058 """get a file context from the working directory
2053
2059
2054 Returns None if file doesn't exist and should be removed."""
2060 Returns None if file doesn't exist and should be removed."""
2055 return self._filectxfn(self._repo, self, path)
2061 return self._filectxfn(self._repo, self, path)
2056
2062
2057 def commit(self):
2063 def commit(self):
2058 """commit context to the repo"""
2064 """commit context to the repo"""
2059 return self._repo.commitctx(self)
2065 return self._repo.commitctx(self)
2060
2066
2061 @propertycache
2067 @propertycache
2062 def _manifest(self):
2068 def _manifest(self):
2063 """generate a manifest based on the return values of filectxfn"""
2069 """generate a manifest based on the return values of filectxfn"""
2064
2070
2065 # keep this simple for now; just worry about p1
2071 # keep this simple for now; just worry about p1
2066 pctx = self._parents[0]
2072 pctx = self._parents[0]
2067 man = pctx.manifest().copy()
2073 man = pctx.manifest().copy()
2068
2074
2069 for f in self._status.modified:
2075 for f in self._status.modified:
2070 p1node = nullid
2076 p1node = nullid
2071 p2node = nullid
2077 p2node = nullid
2072 p = pctx[f].parents() # if file isn't in pctx, check p2?
2078 p = pctx[f].parents() # if file isn't in pctx, check p2?
2073 if len(p) > 0:
2079 if len(p) > 0:
2074 p1node = p[0].filenode()
2080 p1node = p[0].filenode()
2075 if len(p) > 1:
2081 if len(p) > 1:
2076 p2node = p[1].filenode()
2082 p2node = p[1].filenode()
2077 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2083 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2078
2084
2079 for f in self._status.added:
2085 for f in self._status.added:
2080 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2086 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2081
2087
2082 for f in self._status.removed:
2088 for f in self._status.removed:
2083 if f in man:
2089 if f in man:
2084 del man[f]
2090 del man[f]
2085
2091
2086 return man
2092 return man
2087
2093
2088 @propertycache
2094 @propertycache
2089 def _status(self):
2095 def _status(self):
2090 """Calculate exact status from ``files`` specified at construction
2096 """Calculate exact status from ``files`` specified at construction
2091 """
2097 """
2092 man1 = self.p1().manifest()
2098 man1 = self.p1().manifest()
2093 p2 = self._parents[1]
2099 p2 = self._parents[1]
2094 # "1 < len(self._parents)" can't be used for checking
2100 # "1 < len(self._parents)" can't be used for checking
2095 # existence of the 2nd parent, because "memctx._parents" is
2101 # existence of the 2nd parent, because "memctx._parents" is
2096 # explicitly initialized by the list, of which length is 2.
2102 # explicitly initialized by the list, of which length is 2.
2097 if p2.node() != nullid:
2103 if p2.node() != nullid:
2098 man2 = p2.manifest()
2104 man2 = p2.manifest()
2099 managing = lambda f: f in man1 or f in man2
2105 managing = lambda f: f in man1 or f in man2
2100 else:
2106 else:
2101 managing = lambda f: f in man1
2107 managing = lambda f: f in man1
2102
2108
2103 modified, added, removed = [], [], []
2109 modified, added, removed = [], [], []
2104 for f in self._files:
2110 for f in self._files:
2105 if not managing(f):
2111 if not managing(f):
2106 added.append(f)
2112 added.append(f)
2107 elif self[f]:
2113 elif self[f]:
2108 modified.append(f)
2114 modified.append(f)
2109 else:
2115 else:
2110 removed.append(f)
2116 removed.append(f)
2111
2117
2112 return scmutil.status(modified, added, removed, [], [], [], [])
2118 return scmutil.status(modified, added, removed, [], [], [], [])
2113
2119
2114 class memfilectx(committablefilectx):
2120 class memfilectx(committablefilectx):
2115 """memfilectx represents an in-memory file to commit.
2121 """memfilectx represents an in-memory file to commit.
2116
2122
2117 See memctx and committablefilectx for more details.
2123 See memctx and committablefilectx for more details.
2118 """
2124 """
2119 def __init__(self, repo, path, data, islink=False,
2125 def __init__(self, repo, path, data, islink=False,
2120 isexec=False, copied=None, memctx=None):
2126 isexec=False, copied=None, memctx=None):
2121 """
2127 """
2122 path is the normalized file path relative to repository root.
2128 path is the normalized file path relative to repository root.
2123 data is the file content as a string.
2129 data is the file content as a string.
2124 islink is True if the file is a symbolic link.
2130 islink is True if the file is a symbolic link.
2125 isexec is True if the file is executable.
2131 isexec is True if the file is executable.
2126 copied is the source file path if current file was copied in the
2132 copied is the source file path if current file was copied in the
2127 revision being committed, or None."""
2133 revision being committed, or None."""
2128 super(memfilectx, self).__init__(repo, path, None, memctx)
2134 super(memfilectx, self).__init__(repo, path, None, memctx)
2129 self._data = data
2135 self._data = data
2130 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2136 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2131 self._copied = None
2137 self._copied = None
2132 if copied:
2138 if copied:
2133 self._copied = (copied, nullid)
2139 self._copied = (copied, nullid)
2134
2140
2135 def data(self):
2141 def data(self):
2136 return self._data
2142 return self._data
2137
2143
2138 def remove(self, ignoremissing=False):
2144 def remove(self, ignoremissing=False):
2139 """wraps unlink for a repo's working directory"""
2145 """wraps unlink for a repo's working directory"""
2140 # need to figure out what to do here
2146 # need to figure out what to do here
2141 del self._changectx[self._path]
2147 del self._changectx[self._path]
2142
2148
2143 def write(self, data, flags):
2149 def write(self, data, flags):
2144 """wraps repo.wwrite"""
2150 """wraps repo.wwrite"""
2145 self._data = data
2151 self._data = data
2146
2152
2147 class overlayfilectx(committablefilectx):
2153 class overlayfilectx(committablefilectx):
2148 """Like memfilectx but take an original filectx and optional parameters to
2154 """Like memfilectx but take an original filectx and optional parameters to
2149 override parts of it. This is useful when fctx.data() is expensive (i.e.
2155 override parts of it. This is useful when fctx.data() is expensive (i.e.
2150 flag processor is expensive) and raw data, flags, and filenode could be
2156 flag processor is expensive) and raw data, flags, and filenode could be
2151 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2157 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2152 """
2158 """
2153
2159
2154 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2160 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2155 copied=None, ctx=None):
2161 copied=None, ctx=None):
2156 """originalfctx: filecontext to duplicate
2162 """originalfctx: filecontext to duplicate
2157
2163
2158 datafunc: None or a function to override data (file content). It is a
2164 datafunc: None or a function to override data (file content). It is a
2159 function to be lazy. path, flags, copied, ctx: None or overridden value
2165 function to be lazy. path, flags, copied, ctx: None or overridden value
2160
2166
2161 copied could be (path, rev), or False. copied could also be just path,
2167 copied could be (path, rev), or False. copied could also be just path,
2162 and will be converted to (path, nullid). This simplifies some callers.
2168 and will be converted to (path, nullid). This simplifies some callers.
2163 """
2169 """
2164
2170
2165 if path is None:
2171 if path is None:
2166 path = originalfctx.path()
2172 path = originalfctx.path()
2167 if ctx is None:
2173 if ctx is None:
2168 ctx = originalfctx.changectx()
2174 ctx = originalfctx.changectx()
2169 ctxmatch = lambda: True
2175 ctxmatch = lambda: True
2170 else:
2176 else:
2171 ctxmatch = lambda: ctx == originalfctx.changectx()
2177 ctxmatch = lambda: ctx == originalfctx.changectx()
2172
2178
2173 repo = originalfctx.repo()
2179 repo = originalfctx.repo()
2174 flog = originalfctx.filelog()
2180 flog = originalfctx.filelog()
2175 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2181 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2176
2182
2177 if copied is None:
2183 if copied is None:
2178 copied = originalfctx.renamed()
2184 copied = originalfctx.renamed()
2179 copiedmatch = lambda: True
2185 copiedmatch = lambda: True
2180 else:
2186 else:
2181 if copied and not isinstance(copied, tuple):
2187 if copied and not isinstance(copied, tuple):
2182 # repo._filecommit will recalculate copyrev so nullid is okay
2188 # repo._filecommit will recalculate copyrev so nullid is okay
2183 copied = (copied, nullid)
2189 copied = (copied, nullid)
2184 copiedmatch = lambda: copied == originalfctx.renamed()
2190 copiedmatch = lambda: copied == originalfctx.renamed()
2185
2191
2186 # When data, copied (could affect data), ctx (could affect filelog
2192 # When data, copied (could affect data), ctx (could affect filelog
2187 # parents) are not overridden, rawdata, rawflags, and filenode may be
2193 # parents) are not overridden, rawdata, rawflags, and filenode may be
2188 # reused (repo._filecommit should double check filelog parents).
2194 # reused (repo._filecommit should double check filelog parents).
2189 #
2195 #
2190 # path, flags are not hashed in filelog (but in manifestlog) so they do
2196 # path, flags are not hashed in filelog (but in manifestlog) so they do
2191 # not affect reusable here.
2197 # not affect reusable here.
2192 #
2198 #
2193 # If ctx or copied is overridden to a same value with originalfctx,
2199 # If ctx or copied is overridden to a same value with originalfctx,
2194 # still consider it's reusable. originalfctx.renamed() may be a bit
2200 # still consider it's reusable. originalfctx.renamed() may be a bit
2195 # expensive so it's not called unless necessary. Assuming datafunc is
2201 # expensive so it's not called unless necessary. Assuming datafunc is
2196 # always expensive, do not call it for this "reusable" test.
2202 # always expensive, do not call it for this "reusable" test.
2197 reusable = datafunc is None and ctxmatch() and copiedmatch()
2203 reusable = datafunc is None and ctxmatch() and copiedmatch()
2198
2204
2199 if datafunc is None:
2205 if datafunc is None:
2200 datafunc = originalfctx.data
2206 datafunc = originalfctx.data
2201 if flags is None:
2207 if flags is None:
2202 flags = originalfctx.flags()
2208 flags = originalfctx.flags()
2203
2209
2204 self._datafunc = datafunc
2210 self._datafunc = datafunc
2205 self._flags = flags
2211 self._flags = flags
2206 self._copied = copied
2212 self._copied = copied
2207
2213
2208 if reusable:
2214 if reusable:
2209 # copy extra fields from originalfctx
2215 # copy extra fields from originalfctx
2210 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2216 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2211 for attr in attrs:
2217 for attr in attrs:
2212 if util.safehasattr(originalfctx, attr):
2218 if util.safehasattr(originalfctx, attr):
2213 setattr(self, attr, getattr(originalfctx, attr))
2219 setattr(self, attr, getattr(originalfctx, attr))
2214
2220
2215 def data(self):
2221 def data(self):
2216 return self._datafunc()
2222 return self._datafunc()
2217
2223
2218 class metadataonlyctx(committablectx):
2224 class metadataonlyctx(committablectx):
2219 """Like memctx but it's reusing the manifest of different commit.
2225 """Like memctx but it's reusing the manifest of different commit.
2220 Intended to be used by lightweight operations that are creating
2226 Intended to be used by lightweight operations that are creating
2221 metadata-only changes.
2227 metadata-only changes.
2222
2228
2223 Revision information is supplied at initialization time. 'repo' is the
2229 Revision information is supplied at initialization time. 'repo' is the
2224 current localrepo, 'ctx' is original revision which manifest we're reuisng
2230 current localrepo, 'ctx' is original revision which manifest we're reuisng
2225 'parents' is a sequence of two parent revisions identifiers (pass None for
2231 'parents' is a sequence of two parent revisions identifiers (pass None for
2226 every missing parent), 'text' is the commit.
2232 every missing parent), 'text' is the commit.
2227
2233
2228 user receives the committer name and defaults to current repository
2234 user receives the committer name and defaults to current repository
2229 username, date is the commit date in any format supported by
2235 username, date is the commit date in any format supported by
2230 util.parsedate() and defaults to current date, extra is a dictionary of
2236 util.parsedate() and defaults to current date, extra is a dictionary of
2231 metadata or is left empty.
2237 metadata or is left empty.
2232 """
2238 """
2233 def __new__(cls, repo, originalctx, *args, **kwargs):
2239 def __new__(cls, repo, originalctx, *args, **kwargs):
2234 return super(metadataonlyctx, cls).__new__(cls, repo)
2240 return super(metadataonlyctx, cls).__new__(cls, repo)
2235
2241
2236 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2242 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2237 extra=None, editor=False):
2243 extra=None, editor=False):
2238 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2244 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2239 self._rev = None
2245 self._rev = None
2240 self._node = None
2246 self._node = None
2241 self._originalctx = originalctx
2247 self._originalctx = originalctx
2242 self._manifestnode = originalctx.manifestnode()
2248 self._manifestnode = originalctx.manifestnode()
2243 parents = [(p or nullid) for p in parents]
2249 parents = [(p or nullid) for p in parents]
2244 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2250 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2245
2251
2246 # sanity check to ensure that the reused manifest parents are
2252 # sanity check to ensure that the reused manifest parents are
2247 # manifests of our commit parents
2253 # manifests of our commit parents
2248 mp1, mp2 = self.manifestctx().parents
2254 mp1, mp2 = self.manifestctx().parents
2249 if p1 != nullid and p1.manifestnode() != mp1:
2255 if p1 != nullid and p1.manifestnode() != mp1:
2250 raise RuntimeError('can\'t reuse the manifest: '
2256 raise RuntimeError('can\'t reuse the manifest: '
2251 'its p1 doesn\'t match the new ctx p1')
2257 'its p1 doesn\'t match the new ctx p1')
2252 if p2 != nullid and p2.manifestnode() != mp2:
2258 if p2 != nullid and p2.manifestnode() != mp2:
2253 raise RuntimeError('can\'t reuse the manifest: '
2259 raise RuntimeError('can\'t reuse the manifest: '
2254 'its p2 doesn\'t match the new ctx p2')
2260 'its p2 doesn\'t match the new ctx p2')
2255
2261
2256 self._files = originalctx.files()
2262 self._files = originalctx.files()
2257 self.substate = {}
2263 self.substate = {}
2258
2264
2259 if editor:
2265 if editor:
2260 self._text = editor(self._repo, self, [])
2266 self._text = editor(self._repo, self, [])
2261 self._repo.savecommitmessage(self._text)
2267 self._repo.savecommitmessage(self._text)
2262
2268
2263 def manifestnode(self):
2269 def manifestnode(self):
2264 return self._manifestnode
2270 return self._manifestnode
2265
2271
2266 @property
2272 @property
2267 def _manifestctx(self):
2273 def _manifestctx(self):
2268 return self._repo.manifestlog[self._manifestnode]
2274 return self._repo.manifestlog[self._manifestnode]
2269
2275
2270 def filectx(self, path, filelog=None):
2276 def filectx(self, path, filelog=None):
2271 return self._originalctx.filectx(path, filelog=filelog)
2277 return self._originalctx.filectx(path, filelog=filelog)
2272
2278
2273 def commit(self):
2279 def commit(self):
2274 """commit context to the repo"""
2280 """commit context to the repo"""
2275 return self._repo.commitctx(self)
2281 return self._repo.commitctx(self)
2276
2282
2277 @property
2283 @property
2278 def _manifest(self):
2284 def _manifest(self):
2279 return self._originalctx.manifest()
2285 return self._originalctx.manifest()
2280
2286
2281 @propertycache
2287 @propertycache
2282 def _status(self):
2288 def _status(self):
2283 """Calculate exact status from ``files`` specified in the ``origctx``
2289 """Calculate exact status from ``files`` specified in the ``origctx``
2284 and parents manifests.
2290 and parents manifests.
2285 """
2291 """
2286 man1 = self.p1().manifest()
2292 man1 = self.p1().manifest()
2287 p2 = self._parents[1]
2293 p2 = self._parents[1]
2288 # "1 < len(self._parents)" can't be used for checking
2294 # "1 < len(self._parents)" can't be used for checking
2289 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2295 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2290 # explicitly initialized by the list, of which length is 2.
2296 # explicitly initialized by the list, of which length is 2.
2291 if p2.node() != nullid:
2297 if p2.node() != nullid:
2292 man2 = p2.manifest()
2298 man2 = p2.manifest()
2293 managing = lambda f: f in man1 or f in man2
2299 managing = lambda f: f in man1 or f in man2
2294 else:
2300 else:
2295 managing = lambda f: f in man1
2301 managing = lambda f: f in man1
2296
2302
2297 modified, added, removed = [], [], []
2303 modified, added, removed = [], [], []
2298 for f in self._files:
2304 for f in self._files:
2299 if not managing(f):
2305 if not managing(f):
2300 added.append(f)
2306 added.append(f)
2301 elif self[f]:
2307 elif self[f]:
2302 modified.append(f)
2308 modified.append(f)
2303 else:
2309 else:
2304 removed.append(f)
2310 removed.append(f)
2305
2311
2306 return scmutil.status(modified, added, removed, [], [], [], [])
2312 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now