##// END OF EJS Templates
filectx: fix annotate to not directly instantiate filectx...
Durham Goode -
r23770:50f0096a default
parent child Browse files
Show More
@@ -1,1859 +1,1858
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 """return the first ancestor of <srcrev> introducting <fnode>
26 """return the first ancestor of <srcrev> introducting <fnode>
27
27
28 If the linkrev of the file revision does not point to an ancestor of
28 If the linkrev of the file revision does not point to an ancestor of
29 srcrev, we'll walk down the ancestors until we find one introducing this
29 srcrev, we'll walk down the ancestors until we find one introducing this
30 file revision.
30 file revision.
31
31
32 :repo: a localrepository object (used to access changelog and manifest)
32 :repo: a localrepository object (used to access changelog and manifest)
33 :path: the file path
33 :path: the file path
34 :fnode: the nodeid of the file revision
34 :fnode: the nodeid of the file revision
35 :filelog: the filelog of this path
35 :filelog: the filelog of this path
36 :srcrev: the changeset revision we search ancestors from
36 :srcrev: the changeset revision we search ancestors from
37 :inclusive: if true, the src revision will also be checked
37 :inclusive: if true, the src revision will also be checked
38 """
38 """
39 cl = repo.unfiltered().changelog
39 cl = repo.unfiltered().changelog
40 ma = repo.manifest
40 ma = repo.manifest
41 # fetch the linkrev
41 # fetch the linkrev
42 fr = filelog.rev(fnode)
42 fr = filelog.rev(fnode)
43 lkr = filelog.linkrev(fr)
43 lkr = filelog.linkrev(fr)
44 # check if this linkrev is an ancestor of srcrev
44 # check if this linkrev is an ancestor of srcrev
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 if lkr not in anc:
46 if lkr not in anc:
47 for a in anc:
47 for a in anc:
48 ac = cl.read(a) # get changeset data (we avoid object creation).
48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 if path in ac[3]: # checking the 'files' field.
49 if path in ac[3]: # checking the 'files' field.
50 # The file has been touched, check if the content is similar
50 # The file has been touched, check if the content is similar
51 # to the one we search for.
51 # to the one we search for.
52 if fnode == ma.readdelta(ac[0]).get(path):
52 if fnode == ma.readdelta(ac[0]).get(path):
53 return a
53 return a
54 # In theory, we should never get out of that loop without a result. But
54 # In theory, we should never get out of that loop without a result. But
55 # if manifest uses a buggy file revision (not children of the one it
55 # if manifest uses a buggy file revision (not children of the one it
56 # replaces) we could. Such a buggy situation will likely result is crash
56 # replaces) we could. Such a buggy situation will likely result is crash
57 # somewhere else at to some point.
57 # somewhere else at to some point.
58 return lkr
58 return lkr
59
59
60 class basectx(object):
60 class basectx(object):
61 """A basectx object represents the common logic for its children:
61 """A basectx object represents the common logic for its children:
62 changectx: read-only context that is already present in the repo,
62 changectx: read-only context that is already present in the repo,
63 workingctx: a context that represents the working directory and can
63 workingctx: a context that represents the working directory and can
64 be committed,
64 be committed,
65 memctx: a context that represents changes in-memory and can also
65 memctx: a context that represents changes in-memory and can also
66 be committed."""
66 be committed."""
67 def __new__(cls, repo, changeid='', *args, **kwargs):
67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 if isinstance(changeid, basectx):
68 if isinstance(changeid, basectx):
69 return changeid
69 return changeid
70
70
71 o = super(basectx, cls).__new__(cls)
71 o = super(basectx, cls).__new__(cls)
72
72
73 o._repo = repo
73 o._repo = repo
74 o._rev = nullrev
74 o._rev = nullrev
75 o._node = nullid
75 o._node = nullid
76
76
77 return o
77 return o
78
78
79 def __str__(self):
79 def __str__(self):
80 return short(self.node())
80 return short(self.node())
81
81
82 def __int__(self):
82 def __int__(self):
83 return self.rev()
83 return self.rev()
84
84
85 def __repr__(self):
85 def __repr__(self):
86 return "<%s %s>" % (type(self).__name__, str(self))
86 return "<%s %s>" % (type(self).__name__, str(self))
87
87
88 def __eq__(self, other):
88 def __eq__(self, other):
89 try:
89 try:
90 return type(self) == type(other) and self._rev == other._rev
90 return type(self) == type(other) and self._rev == other._rev
91 except AttributeError:
91 except AttributeError:
92 return False
92 return False
93
93
94 def __ne__(self, other):
94 def __ne__(self, other):
95 return not (self == other)
95 return not (self == other)
96
96
97 def __contains__(self, key):
97 def __contains__(self, key):
98 return key in self._manifest
98 return key in self._manifest
99
99
100 def __getitem__(self, key):
100 def __getitem__(self, key):
101 return self.filectx(key)
101 return self.filectx(key)
102
102
103 def __iter__(self):
103 def __iter__(self):
104 for f in sorted(self._manifest):
104 for f in sorted(self._manifest):
105 yield f
105 yield f
106
106
107 def _manifestmatches(self, match, s):
107 def _manifestmatches(self, match, s):
108 """generate a new manifest filtered by the match argument
108 """generate a new manifest filtered by the match argument
109
109
110 This method is for internal use only and mainly exists to provide an
110 This method is for internal use only and mainly exists to provide an
111 object oriented way for other contexts to customize the manifest
111 object oriented way for other contexts to customize the manifest
112 generation.
112 generation.
113 """
113 """
114 return self.manifest().matches(match)
114 return self.manifest().matches(match)
115
115
116 def _matchstatus(self, other, match):
116 def _matchstatus(self, other, match):
117 """return match.always if match is none
117 """return match.always if match is none
118
118
119 This internal method provides a way for child objects to override the
119 This internal method provides a way for child objects to override the
120 match operator.
120 match operator.
121 """
121 """
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123
123
124 def _buildstatus(self, other, s, match, listignored, listclean,
124 def _buildstatus(self, other, s, match, listignored, listclean,
125 listunknown):
125 listunknown):
126 """build a status with respect to another context"""
126 """build a status with respect to another context"""
127 # Load earliest manifest first for caching reasons. More specifically,
127 # Load earliest manifest first for caching reasons. More specifically,
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 # 1000 and cache it so that when you read 1001, we just need to apply a
130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 # delta to what's in the cache. So that's one full reconstruction + one
131 # delta to what's in the cache. So that's one full reconstruction + one
132 # delta application.
132 # delta application.
133 if self.rev() is not None and self.rev() < other.rev():
133 if self.rev() is not None and self.rev() < other.rev():
134 self.manifest()
134 self.manifest()
135 mf1 = other._manifestmatches(match, s)
135 mf1 = other._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
137
137
138 modified, added = [], []
138 modified, added = [], []
139 removed = []
139 removed = []
140 clean = []
140 clean = []
141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
142 deletedset = set(deleted)
142 deletedset = set(deleted)
143 d = mf1.diff(mf2, clean=listclean)
143 d = mf1.diff(mf2, clean=listclean)
144 for fn, value in d.iteritems():
144 for fn, value in d.iteritems():
145 if fn in deletedset:
145 if fn in deletedset:
146 continue
146 continue
147 if value is None:
147 if value is None:
148 clean.append(fn)
148 clean.append(fn)
149 continue
149 continue
150 (node1, flag1), (node2, flag2) = value
150 (node1, flag1), (node2, flag2) = value
151 if node1 is None:
151 if node1 is None:
152 added.append(fn)
152 added.append(fn)
153 elif node2 is None:
153 elif node2 is None:
154 removed.append(fn)
154 removed.append(fn)
155 elif node2 != _newnode:
155 elif node2 != _newnode:
156 # The file was not a new file in mf2, so an entry
156 # The file was not a new file in mf2, so an entry
157 # from diff is really a difference.
157 # from diff is really a difference.
158 modified.append(fn)
158 modified.append(fn)
159 elif self[fn].cmp(other[fn]):
159 elif self[fn].cmp(other[fn]):
160 # node2 was newnode, but the working file doesn't
160 # node2 was newnode, but the working file doesn't
161 # match the one in mf1.
161 # match the one in mf1.
162 modified.append(fn)
162 modified.append(fn)
163 else:
163 else:
164 clean.append(fn)
164 clean.append(fn)
165
165
166 if removed:
166 if removed:
167 # need to filter files if they are already reported as removed
167 # need to filter files if they are already reported as removed
168 unknown = [fn for fn in unknown if fn not in mf1]
168 unknown = [fn for fn in unknown if fn not in mf1]
169 ignored = [fn for fn in ignored if fn not in mf1]
169 ignored = [fn for fn in ignored if fn not in mf1]
170 # if they're deleted, don't report them as removed
170 # if they're deleted, don't report them as removed
171 removed = [fn for fn in removed if fn not in deletedset]
171 removed = [fn for fn in removed if fn not in deletedset]
172
172
173 return scmutil.status(modified, added, removed, deleted, unknown,
173 return scmutil.status(modified, added, removed, deleted, unknown,
174 ignored, clean)
174 ignored, clean)
175
175
176 @propertycache
176 @propertycache
177 def substate(self):
177 def substate(self):
178 return subrepo.state(self, self._repo.ui)
178 return subrepo.state(self, self._repo.ui)
179
179
180 def subrev(self, subpath):
180 def subrev(self, subpath):
181 return self.substate[subpath][1]
181 return self.substate[subpath][1]
182
182
183 def rev(self):
183 def rev(self):
184 return self._rev
184 return self._rev
185 def node(self):
185 def node(self):
186 return self._node
186 return self._node
187 def hex(self):
187 def hex(self):
188 return hex(self.node())
188 return hex(self.node())
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191 def phasestr(self):
191 def phasestr(self):
192 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def getfileset(self, expr):
196 def getfileset(self, expr):
197 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206
206
207 def unstable(self):
207 def unstable(self):
208 """True if the changeset is not obsolete but it's ancestor are"""
208 """True if the changeset is not obsolete but it's ancestor are"""
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210
210
211 def bumped(self):
211 def bumped(self):
212 """True if the changeset try to be a successor of a public changeset
212 """True if the changeset try to be a successor of a public changeset
213
213
214 Only non-public and non-obsolete changesets may be bumped.
214 Only non-public and non-obsolete changesets may be bumped.
215 """
215 """
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217
217
218 def divergent(self):
218 def divergent(self):
219 """Is a successors of a changeset with multiple possible successors set
219 """Is a successors of a changeset with multiple possible successors set
220
220
221 Only non-public and non-obsolete changesets may be divergent.
221 Only non-public and non-obsolete changesets may be divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224
224
225 def troubled(self):
225 def troubled(self):
226 """True if the changeset is either unstable, bumped or divergent"""
226 """True if the changeset is either unstable, bumped or divergent"""
227 return self.unstable() or self.bumped() or self.divergent()
227 return self.unstable() or self.bumped() or self.divergent()
228
228
229 def troubles(self):
229 def troubles(self):
230 """return the list of troubles affecting this changesets.
230 """return the list of troubles affecting this changesets.
231
231
232 Troubles are returned as strings. possible values are:
232 Troubles are returned as strings. possible values are:
233 - unstable,
233 - unstable,
234 - bumped,
234 - bumped,
235 - divergent.
235 - divergent.
236 """
236 """
237 troubles = []
237 troubles = []
238 if self.unstable():
238 if self.unstable():
239 troubles.append('unstable')
239 troubles.append('unstable')
240 if self.bumped():
240 if self.bumped():
241 troubles.append('bumped')
241 troubles.append('bumped')
242 if self.divergent():
242 if self.divergent():
243 troubles.append('divergent')
243 troubles.append('divergent')
244 return troubles
244 return troubles
245
245
246 def parents(self):
246 def parents(self):
247 """return contexts for each parent changeset"""
247 """return contexts for each parent changeset"""
248 return self._parents
248 return self._parents
249
249
250 def p1(self):
250 def p1(self):
251 return self._parents[0]
251 return self._parents[0]
252
252
253 def p2(self):
253 def p2(self):
254 if len(self._parents) == 2:
254 if len(self._parents) == 2:
255 return self._parents[1]
255 return self._parents[1]
256 return changectx(self._repo, -1)
256 return changectx(self._repo, -1)
257
257
258 def _fileinfo(self, path):
258 def _fileinfo(self, path):
259 if '_manifest' in self.__dict__:
259 if '_manifest' in self.__dict__:
260 try:
260 try:
261 return self._manifest[path], self._manifest.flags(path)
261 return self._manifest[path], self._manifest.flags(path)
262 except KeyError:
262 except KeyError:
263 raise error.ManifestLookupError(self._node, path,
263 raise error.ManifestLookupError(self._node, path,
264 _('not found in manifest'))
264 _('not found in manifest'))
265 if '_manifestdelta' in self.__dict__ or path in self.files():
265 if '_manifestdelta' in self.__dict__ or path in self.files():
266 if path in self._manifestdelta:
266 if path in self._manifestdelta:
267 return (self._manifestdelta[path],
267 return (self._manifestdelta[path],
268 self._manifestdelta.flags(path))
268 self._manifestdelta.flags(path))
269 node, flag = self._repo.manifest.find(self._changeset[0], path)
269 node, flag = self._repo.manifest.find(self._changeset[0], path)
270 if not node:
270 if not node:
271 raise error.ManifestLookupError(self._node, path,
271 raise error.ManifestLookupError(self._node, path,
272 _('not found in manifest'))
272 _('not found in manifest'))
273
273
274 return node, flag
274 return node, flag
275
275
276 def filenode(self, path):
276 def filenode(self, path):
277 return self._fileinfo(path)[0]
277 return self._fileinfo(path)[0]
278
278
279 def flags(self, path):
279 def flags(self, path):
280 try:
280 try:
281 return self._fileinfo(path)[1]
281 return self._fileinfo(path)[1]
282 except error.LookupError:
282 except error.LookupError:
283 return ''
283 return ''
284
284
285 def sub(self, path):
285 def sub(self, path):
286 return subrepo.subrepo(self, path)
286 return subrepo.subrepo(self, path)
287
287
288 def match(self, pats=[], include=None, exclude=None, default='glob'):
288 def match(self, pats=[], include=None, exclude=None, default='glob'):
289 r = self._repo
289 r = self._repo
290 return matchmod.match(r.root, r.getcwd(), pats,
290 return matchmod.match(r.root, r.getcwd(), pats,
291 include, exclude, default,
291 include, exclude, default,
292 auditor=r.auditor, ctx=self)
292 auditor=r.auditor, ctx=self)
293
293
294 def diff(self, ctx2=None, match=None, **opts):
294 def diff(self, ctx2=None, match=None, **opts):
295 """Returns a diff generator for the given contexts and matcher"""
295 """Returns a diff generator for the given contexts and matcher"""
296 if ctx2 is None:
296 if ctx2 is None:
297 ctx2 = self.p1()
297 ctx2 = self.p1()
298 if ctx2 is not None:
298 if ctx2 is not None:
299 ctx2 = self._repo[ctx2]
299 ctx2 = self._repo[ctx2]
300 diffopts = patch.diffopts(self._repo.ui, opts)
300 diffopts = patch.diffopts(self._repo.ui, opts)
301 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
301 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
302
302
303 @propertycache
303 @propertycache
304 def _dirs(self):
304 def _dirs(self):
305 return scmutil.dirs(self._manifest)
305 return scmutil.dirs(self._manifest)
306
306
307 def dirs(self):
307 def dirs(self):
308 return self._dirs
308 return self._dirs
309
309
310 def dirty(self, missing=False, merge=True, branch=True):
310 def dirty(self, missing=False, merge=True, branch=True):
311 return False
311 return False
312
312
313 def status(self, other=None, match=None, listignored=False,
313 def status(self, other=None, match=None, listignored=False,
314 listclean=False, listunknown=False, listsubrepos=False):
314 listclean=False, listunknown=False, listsubrepos=False):
315 """return status of files between two nodes or node and working
315 """return status of files between two nodes or node and working
316 directory.
316 directory.
317
317
318 If other is None, compare this node with working directory.
318 If other is None, compare this node with working directory.
319
319
320 returns (modified, added, removed, deleted, unknown, ignored, clean)
320 returns (modified, added, removed, deleted, unknown, ignored, clean)
321 """
321 """
322
322
323 ctx1 = self
323 ctx1 = self
324 ctx2 = self._repo[other]
324 ctx2 = self._repo[other]
325
325
326 # This next code block is, admittedly, fragile logic that tests for
326 # This next code block is, admittedly, fragile logic that tests for
327 # reversing the contexts and wouldn't need to exist if it weren't for
327 # reversing the contexts and wouldn't need to exist if it weren't for
328 # the fast (and common) code path of comparing the working directory
328 # the fast (and common) code path of comparing the working directory
329 # with its first parent.
329 # with its first parent.
330 #
330 #
331 # What we're aiming for here is the ability to call:
331 # What we're aiming for here is the ability to call:
332 #
332 #
333 # workingctx.status(parentctx)
333 # workingctx.status(parentctx)
334 #
334 #
335 # If we always built the manifest for each context and compared those,
335 # If we always built the manifest for each context and compared those,
336 # then we'd be done. But the special case of the above call means we
336 # then we'd be done. But the special case of the above call means we
337 # just copy the manifest of the parent.
337 # just copy the manifest of the parent.
338 reversed = False
338 reversed = False
339 if (not isinstance(ctx1, changectx)
339 if (not isinstance(ctx1, changectx)
340 and isinstance(ctx2, changectx)):
340 and isinstance(ctx2, changectx)):
341 reversed = True
341 reversed = True
342 ctx1, ctx2 = ctx2, ctx1
342 ctx1, ctx2 = ctx2, ctx1
343
343
344 match = ctx2._matchstatus(ctx1, match)
344 match = ctx2._matchstatus(ctx1, match)
345 r = scmutil.status([], [], [], [], [], [], [])
345 r = scmutil.status([], [], [], [], [], [], [])
346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
347 listunknown)
347 listunknown)
348
348
349 if reversed:
349 if reversed:
350 # Reverse added and removed. Clear deleted, unknown and ignored as
350 # Reverse added and removed. Clear deleted, unknown and ignored as
351 # these make no sense to reverse.
351 # these make no sense to reverse.
352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
353 r.clean)
353 r.clean)
354
354
355 if listsubrepos:
355 if listsubrepos:
356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
357 rev2 = ctx2.subrev(subpath)
357 rev2 = ctx2.subrev(subpath)
358 try:
358 try:
359 submatch = matchmod.narrowmatcher(subpath, match)
359 submatch = matchmod.narrowmatcher(subpath, match)
360 s = sub.status(rev2, match=submatch, ignored=listignored,
360 s = sub.status(rev2, match=submatch, ignored=listignored,
361 clean=listclean, unknown=listunknown,
361 clean=listclean, unknown=listunknown,
362 listsubrepos=True)
362 listsubrepos=True)
363 for rfiles, sfiles in zip(r, s):
363 for rfiles, sfiles in zip(r, s):
364 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
364 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
365 except error.LookupError:
365 except error.LookupError:
366 self._repo.ui.status(_("skipping missing "
366 self._repo.ui.status(_("skipping missing "
367 "subrepository: %s\n") % subpath)
367 "subrepository: %s\n") % subpath)
368
368
369 for l in r:
369 for l in r:
370 l.sort()
370 l.sort()
371
371
372 return r
372 return r
373
373
374
374
375 def makememctx(repo, parents, text, user, date, branch, files, store,
375 def makememctx(repo, parents, text, user, date, branch, files, store,
376 editor=None):
376 editor=None):
377 def getfilectx(repo, memctx, path):
377 def getfilectx(repo, memctx, path):
378 data, mode, copied = store.getfile(path)
378 data, mode, copied = store.getfile(path)
379 if data is None:
379 if data is None:
380 return None
380 return None
381 islink, isexec = mode
381 islink, isexec = mode
382 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
382 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
383 copied=copied, memctx=memctx)
383 copied=copied, memctx=memctx)
384 extra = {}
384 extra = {}
385 if branch:
385 if branch:
386 extra['branch'] = encoding.fromlocal(branch)
386 extra['branch'] = encoding.fromlocal(branch)
387 ctx = memctx(repo, parents, text, files, getfilectx, user,
387 ctx = memctx(repo, parents, text, files, getfilectx, user,
388 date, extra, editor)
388 date, extra, editor)
389 return ctx
389 return ctx
390
390
391 class changectx(basectx):
391 class changectx(basectx):
392 """A changecontext object makes access to data related to a particular
392 """A changecontext object makes access to data related to a particular
393 changeset convenient. It represents a read-only context already present in
393 changeset convenient. It represents a read-only context already present in
394 the repo."""
394 the repo."""
395 def __init__(self, repo, changeid=''):
395 def __init__(self, repo, changeid=''):
396 """changeid is a revision number, node, or tag"""
396 """changeid is a revision number, node, or tag"""
397
397
398 # since basectx.__new__ already took care of copying the object, we
398 # since basectx.__new__ already took care of copying the object, we
399 # don't need to do anything in __init__, so we just exit here
399 # don't need to do anything in __init__, so we just exit here
400 if isinstance(changeid, basectx):
400 if isinstance(changeid, basectx):
401 return
401 return
402
402
403 if changeid == '':
403 if changeid == '':
404 changeid = '.'
404 changeid = '.'
405 self._repo = repo
405 self._repo = repo
406
406
407 try:
407 try:
408 if isinstance(changeid, int):
408 if isinstance(changeid, int):
409 self._node = repo.changelog.node(changeid)
409 self._node = repo.changelog.node(changeid)
410 self._rev = changeid
410 self._rev = changeid
411 return
411 return
412 if isinstance(changeid, long):
412 if isinstance(changeid, long):
413 changeid = str(changeid)
413 changeid = str(changeid)
414 if changeid == '.':
414 if changeid == '.':
415 self._node = repo.dirstate.p1()
415 self._node = repo.dirstate.p1()
416 self._rev = repo.changelog.rev(self._node)
416 self._rev = repo.changelog.rev(self._node)
417 return
417 return
418 if changeid == 'null':
418 if changeid == 'null':
419 self._node = nullid
419 self._node = nullid
420 self._rev = nullrev
420 self._rev = nullrev
421 return
421 return
422 if changeid == 'tip':
422 if changeid == 'tip':
423 self._node = repo.changelog.tip()
423 self._node = repo.changelog.tip()
424 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
425 return
425 return
426 if len(changeid) == 20:
426 if len(changeid) == 20:
427 try:
427 try:
428 self._node = changeid
428 self._node = changeid
429 self._rev = repo.changelog.rev(changeid)
429 self._rev = repo.changelog.rev(changeid)
430 return
430 return
431 except error.FilteredRepoLookupError:
431 except error.FilteredRepoLookupError:
432 raise
432 raise
433 except LookupError:
433 except LookupError:
434 pass
434 pass
435
435
436 try:
436 try:
437 r = int(changeid)
437 r = int(changeid)
438 if str(r) != changeid:
438 if str(r) != changeid:
439 raise ValueError
439 raise ValueError
440 l = len(repo.changelog)
440 l = len(repo.changelog)
441 if r < 0:
441 if r < 0:
442 r += l
442 r += l
443 if r < 0 or r >= l:
443 if r < 0 or r >= l:
444 raise ValueError
444 raise ValueError
445 self._rev = r
445 self._rev = r
446 self._node = repo.changelog.node(r)
446 self._node = repo.changelog.node(r)
447 return
447 return
448 except error.FilteredIndexError:
448 except error.FilteredIndexError:
449 raise
449 raise
450 except (ValueError, OverflowError, IndexError):
450 except (ValueError, OverflowError, IndexError):
451 pass
451 pass
452
452
453 if len(changeid) == 40:
453 if len(changeid) == 40:
454 try:
454 try:
455 self._node = bin(changeid)
455 self._node = bin(changeid)
456 self._rev = repo.changelog.rev(self._node)
456 self._rev = repo.changelog.rev(self._node)
457 return
457 return
458 except error.FilteredLookupError:
458 except error.FilteredLookupError:
459 raise
459 raise
460 except (TypeError, LookupError):
460 except (TypeError, LookupError):
461 pass
461 pass
462
462
463 # lookup bookmarks through the name interface
463 # lookup bookmarks through the name interface
464 try:
464 try:
465 self._node = repo.names.singlenode(repo, changeid)
465 self._node = repo.names.singlenode(repo, changeid)
466 self._rev = repo.changelog.rev(self._node)
466 self._rev = repo.changelog.rev(self._node)
467 return
467 return
468 except KeyError:
468 except KeyError:
469 pass
469 pass
470 except error.FilteredRepoLookupError:
470 except error.FilteredRepoLookupError:
471 raise
471 raise
472 except error.RepoLookupError:
472 except error.RepoLookupError:
473 pass
473 pass
474
474
475 self._node = repo.unfiltered().changelog._partialmatch(changeid)
475 self._node = repo.unfiltered().changelog._partialmatch(changeid)
476 if self._node is not None:
476 if self._node is not None:
477 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
478 return
478 return
479
479
480 # lookup failed
480 # lookup failed
481 # check if it might have come from damaged dirstate
481 # check if it might have come from damaged dirstate
482 #
482 #
483 # XXX we could avoid the unfiltered if we had a recognizable
483 # XXX we could avoid the unfiltered if we had a recognizable
484 # exception for filtered changeset access
484 # exception for filtered changeset access
485 if changeid in repo.unfiltered().dirstate.parents():
485 if changeid in repo.unfiltered().dirstate.parents():
486 msg = _("working directory has unknown parent '%s'!")
486 msg = _("working directory has unknown parent '%s'!")
487 raise error.Abort(msg % short(changeid))
487 raise error.Abort(msg % short(changeid))
488 try:
488 try:
489 if len(changeid) == 20:
489 if len(changeid) == 20:
490 changeid = hex(changeid)
490 changeid = hex(changeid)
491 except TypeError:
491 except TypeError:
492 pass
492 pass
493 except (error.FilteredIndexError, error.FilteredLookupError,
493 except (error.FilteredIndexError, error.FilteredLookupError,
494 error.FilteredRepoLookupError):
494 error.FilteredRepoLookupError):
495 if repo.filtername == 'visible':
495 if repo.filtername == 'visible':
496 msg = _("hidden revision '%s'") % changeid
496 msg = _("hidden revision '%s'") % changeid
497 hint = _('use --hidden to access hidden revisions')
497 hint = _('use --hidden to access hidden revisions')
498 raise error.FilteredRepoLookupError(msg, hint=hint)
498 raise error.FilteredRepoLookupError(msg, hint=hint)
499 msg = _("filtered revision '%s' (not in '%s' subset)")
499 msg = _("filtered revision '%s' (not in '%s' subset)")
500 msg %= (changeid, repo.filtername)
500 msg %= (changeid, repo.filtername)
501 raise error.FilteredRepoLookupError(msg)
501 raise error.FilteredRepoLookupError(msg)
502 except IndexError:
502 except IndexError:
503 pass
503 pass
504 raise error.RepoLookupError(
504 raise error.RepoLookupError(
505 _("unknown revision '%s'") % changeid)
505 _("unknown revision '%s'") % changeid)
506
506
507 def __hash__(self):
507 def __hash__(self):
508 try:
508 try:
509 return hash(self._rev)
509 return hash(self._rev)
510 except AttributeError:
510 except AttributeError:
511 return id(self)
511 return id(self)
512
512
513 def __nonzero__(self):
513 def __nonzero__(self):
514 return self._rev != nullrev
514 return self._rev != nullrev
515
515
516 @propertycache
516 @propertycache
517 def _changeset(self):
517 def _changeset(self):
518 return self._repo.changelog.read(self.rev())
518 return self._repo.changelog.read(self.rev())
519
519
520 @propertycache
520 @propertycache
521 def _manifest(self):
521 def _manifest(self):
522 return self._repo.manifest.read(self._changeset[0])
522 return self._repo.manifest.read(self._changeset[0])
523
523
524 @propertycache
524 @propertycache
525 def _manifestdelta(self):
525 def _manifestdelta(self):
526 return self._repo.manifest.readdelta(self._changeset[0])
526 return self._repo.manifest.readdelta(self._changeset[0])
527
527
528 @propertycache
528 @propertycache
529 def _parents(self):
529 def _parents(self):
530 p = self._repo.changelog.parentrevs(self._rev)
530 p = self._repo.changelog.parentrevs(self._rev)
531 if p[1] == nullrev:
531 if p[1] == nullrev:
532 p = p[:-1]
532 p = p[:-1]
533 return [changectx(self._repo, x) for x in p]
533 return [changectx(self._repo, x) for x in p]
534
534
535 def changeset(self):
535 def changeset(self):
536 return self._changeset
536 return self._changeset
537 def manifestnode(self):
537 def manifestnode(self):
538 return self._changeset[0]
538 return self._changeset[0]
539
539
540 def user(self):
540 def user(self):
541 return self._changeset[1]
541 return self._changeset[1]
542 def date(self):
542 def date(self):
543 return self._changeset[2]
543 return self._changeset[2]
544 def files(self):
544 def files(self):
545 return self._changeset[3]
545 return self._changeset[3]
546 def description(self):
546 def description(self):
547 return self._changeset[4]
547 return self._changeset[4]
548 def branch(self):
548 def branch(self):
549 return encoding.tolocal(self._changeset[5].get("branch"))
549 return encoding.tolocal(self._changeset[5].get("branch"))
550 def closesbranch(self):
550 def closesbranch(self):
551 return 'close' in self._changeset[5]
551 return 'close' in self._changeset[5]
552 def extra(self):
552 def extra(self):
553 return self._changeset[5]
553 return self._changeset[5]
554 def tags(self):
554 def tags(self):
555 return self._repo.nodetags(self._node)
555 return self._repo.nodetags(self._node)
556 def bookmarks(self):
556 def bookmarks(self):
557 return self._repo.nodebookmarks(self._node)
557 return self._repo.nodebookmarks(self._node)
558 def phase(self):
558 def phase(self):
559 return self._repo._phasecache.phase(self._repo, self._rev)
559 return self._repo._phasecache.phase(self._repo, self._rev)
560 def hidden(self):
560 def hidden(self):
561 return self._rev in repoview.filterrevs(self._repo, 'visible')
561 return self._rev in repoview.filterrevs(self._repo, 'visible')
562
562
563 def children(self):
563 def children(self):
564 """return contexts for each child changeset"""
564 """return contexts for each child changeset"""
565 c = self._repo.changelog.children(self._node)
565 c = self._repo.changelog.children(self._node)
566 return [changectx(self._repo, x) for x in c]
566 return [changectx(self._repo, x) for x in c]
567
567
568 def ancestors(self):
568 def ancestors(self):
569 for a in self._repo.changelog.ancestors([self._rev]):
569 for a in self._repo.changelog.ancestors([self._rev]):
570 yield changectx(self._repo, a)
570 yield changectx(self._repo, a)
571
571
572 def descendants(self):
572 def descendants(self):
573 for d in self._repo.changelog.descendants([self._rev]):
573 for d in self._repo.changelog.descendants([self._rev]):
574 yield changectx(self._repo, d)
574 yield changectx(self._repo, d)
575
575
576 def filectx(self, path, fileid=None, filelog=None):
576 def filectx(self, path, fileid=None, filelog=None):
577 """get a file context from this changeset"""
577 """get a file context from this changeset"""
578 if fileid is None:
578 if fileid is None:
579 fileid = self.filenode(path)
579 fileid = self.filenode(path)
580 return filectx(self._repo, path, fileid=fileid,
580 return filectx(self._repo, path, fileid=fileid,
581 changectx=self, filelog=filelog)
581 changectx=self, filelog=filelog)
582
582
583 def ancestor(self, c2, warn=False):
583 def ancestor(self, c2, warn=False):
584 """return the "best" ancestor context of self and c2
584 """return the "best" ancestor context of self and c2
585
585
586 If there are multiple candidates, it will show a message and check
586 If there are multiple candidates, it will show a message and check
587 merge.preferancestor configuration before falling back to the
587 merge.preferancestor configuration before falling back to the
588 revlog ancestor."""
588 revlog ancestor."""
589 # deal with workingctxs
589 # deal with workingctxs
590 n2 = c2._node
590 n2 = c2._node
591 if n2 is None:
591 if n2 is None:
592 n2 = c2._parents[0]._node
592 n2 = c2._parents[0]._node
593 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
593 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
594 if not cahs:
594 if not cahs:
595 anc = nullid
595 anc = nullid
596 elif len(cahs) == 1:
596 elif len(cahs) == 1:
597 anc = cahs[0]
597 anc = cahs[0]
598 else:
598 else:
599 for r in self._repo.ui.configlist('merge', 'preferancestor'):
599 for r in self._repo.ui.configlist('merge', 'preferancestor'):
600 try:
600 try:
601 ctx = changectx(self._repo, r)
601 ctx = changectx(self._repo, r)
602 except error.RepoLookupError:
602 except error.RepoLookupError:
603 continue
603 continue
604 anc = ctx.node()
604 anc = ctx.node()
605 if anc in cahs:
605 if anc in cahs:
606 break
606 break
607 else:
607 else:
608 anc = self._repo.changelog.ancestor(self._node, n2)
608 anc = self._repo.changelog.ancestor(self._node, n2)
609 if warn:
609 if warn:
610 self._repo.ui.status(
610 self._repo.ui.status(
611 (_("note: using %s as ancestor of %s and %s\n") %
611 (_("note: using %s as ancestor of %s and %s\n") %
612 (short(anc), short(self._node), short(n2))) +
612 (short(anc), short(self._node), short(n2))) +
613 ''.join(_(" alternatively, use --config "
613 ''.join(_(" alternatively, use --config "
614 "merge.preferancestor=%s\n") %
614 "merge.preferancestor=%s\n") %
615 short(n) for n in sorted(cahs) if n != anc))
615 short(n) for n in sorted(cahs) if n != anc))
616 return changectx(self._repo, anc)
616 return changectx(self._repo, anc)
617
617
618 def descendant(self, other):
618 def descendant(self, other):
619 """True if other is descendant of this changeset"""
619 """True if other is descendant of this changeset"""
620 return self._repo.changelog.descendant(self._rev, other._rev)
620 return self._repo.changelog.descendant(self._rev, other._rev)
621
621
622 def walk(self, match):
622 def walk(self, match):
623 fset = set(match.files())
623 fset = set(match.files())
624 # for dirstate.walk, files=['.'] means "walk the whole tree".
624 # for dirstate.walk, files=['.'] means "walk the whole tree".
625 # follow that here, too
625 # follow that here, too
626 fset.discard('.')
626 fset.discard('.')
627
627
628 # avoid the entire walk if we're only looking for specific files
628 # avoid the entire walk if we're only looking for specific files
629 if fset and not match.anypats():
629 if fset and not match.anypats():
630 if util.all([fn in self for fn in fset]):
630 if util.all([fn in self for fn in fset]):
631 for fn in sorted(fset):
631 for fn in sorted(fset):
632 if match(fn):
632 if match(fn):
633 yield fn
633 yield fn
634 raise StopIteration
634 raise StopIteration
635
635
636 for fn in self:
636 for fn in self:
637 if fn in fset:
637 if fn in fset:
638 # specified pattern is the exact name
638 # specified pattern is the exact name
639 fset.remove(fn)
639 fset.remove(fn)
640 if match(fn):
640 if match(fn):
641 yield fn
641 yield fn
642 for fn in sorted(fset):
642 for fn in sorted(fset):
643 if fn in self._dirs:
643 if fn in self._dirs:
644 # specified pattern is a directory
644 # specified pattern is a directory
645 continue
645 continue
646 match.bad(fn, _('no such file in rev %s') % self)
646 match.bad(fn, _('no such file in rev %s') % self)
647
647
648 def matches(self, match):
648 def matches(self, match):
649 return self.walk(match)
649 return self.walk(match)
650
650
651 class basefilectx(object):
651 class basefilectx(object):
652 """A filecontext object represents the common logic for its children:
652 """A filecontext object represents the common logic for its children:
653 filectx: read-only access to a filerevision that is already present
653 filectx: read-only access to a filerevision that is already present
654 in the repo,
654 in the repo,
655 workingfilectx: a filecontext that represents files from the working
655 workingfilectx: a filecontext that represents files from the working
656 directory,
656 directory,
657 memfilectx: a filecontext that represents files in-memory."""
657 memfilectx: a filecontext that represents files in-memory."""
658 def __new__(cls, repo, path, *args, **kwargs):
658 def __new__(cls, repo, path, *args, **kwargs):
659 return super(basefilectx, cls).__new__(cls)
659 return super(basefilectx, cls).__new__(cls)
660
660
661 @propertycache
661 @propertycache
662 def _filelog(self):
662 def _filelog(self):
663 return self._repo.file(self._path)
663 return self._repo.file(self._path)
664
664
665 @propertycache
665 @propertycache
666 def _changeid(self):
666 def _changeid(self):
667 if '_changeid' in self.__dict__:
667 if '_changeid' in self.__dict__:
668 return self._changeid
668 return self._changeid
669 elif '_changectx' in self.__dict__:
669 elif '_changectx' in self.__dict__:
670 return self._changectx.rev()
670 return self._changectx.rev()
671 else:
671 else:
672 return self._filelog.linkrev(self._filerev)
672 return self._filelog.linkrev(self._filerev)
673
673
674 @propertycache
674 @propertycache
675 def _filenode(self):
675 def _filenode(self):
676 if '_fileid' in self.__dict__:
676 if '_fileid' in self.__dict__:
677 return self._filelog.lookup(self._fileid)
677 return self._filelog.lookup(self._fileid)
678 else:
678 else:
679 return self._changectx.filenode(self._path)
679 return self._changectx.filenode(self._path)
680
680
681 @propertycache
681 @propertycache
682 def _filerev(self):
682 def _filerev(self):
683 return self._filelog.rev(self._filenode)
683 return self._filelog.rev(self._filenode)
684
684
685 @propertycache
685 @propertycache
686 def _repopath(self):
686 def _repopath(self):
687 return self._path
687 return self._path
688
688
689 def __nonzero__(self):
689 def __nonzero__(self):
690 try:
690 try:
691 self._filenode
691 self._filenode
692 return True
692 return True
693 except error.LookupError:
693 except error.LookupError:
694 # file is missing
694 # file is missing
695 return False
695 return False
696
696
697 def __str__(self):
697 def __str__(self):
698 return "%s@%s" % (self.path(), self._changectx)
698 return "%s@%s" % (self.path(), self._changectx)
699
699
700 def __repr__(self):
700 def __repr__(self):
701 return "<%s %s>" % (type(self).__name__, str(self))
701 return "<%s %s>" % (type(self).__name__, str(self))
702
702
703 def __hash__(self):
703 def __hash__(self):
704 try:
704 try:
705 return hash((self._path, self._filenode))
705 return hash((self._path, self._filenode))
706 except AttributeError:
706 except AttributeError:
707 return id(self)
707 return id(self)
708
708
709 def __eq__(self, other):
709 def __eq__(self, other):
710 try:
710 try:
711 return (type(self) == type(other) and self._path == other._path
711 return (type(self) == type(other) and self._path == other._path
712 and self._filenode == other._filenode)
712 and self._filenode == other._filenode)
713 except AttributeError:
713 except AttributeError:
714 return False
714 return False
715
715
716 def __ne__(self, other):
716 def __ne__(self, other):
717 return not (self == other)
717 return not (self == other)
718
718
719 def filerev(self):
719 def filerev(self):
720 return self._filerev
720 return self._filerev
721 def filenode(self):
721 def filenode(self):
722 return self._filenode
722 return self._filenode
723 def flags(self):
723 def flags(self):
724 return self._changectx.flags(self._path)
724 return self._changectx.flags(self._path)
725 def filelog(self):
725 def filelog(self):
726 return self._filelog
726 return self._filelog
727 def rev(self):
727 def rev(self):
728 return self._changeid
728 return self._changeid
729 def linkrev(self):
729 def linkrev(self):
730 return self._filelog.linkrev(self._filerev)
730 return self._filelog.linkrev(self._filerev)
731 def node(self):
731 def node(self):
732 return self._changectx.node()
732 return self._changectx.node()
733 def hex(self):
733 def hex(self):
734 return self._changectx.hex()
734 return self._changectx.hex()
735 def user(self):
735 def user(self):
736 return self._changectx.user()
736 return self._changectx.user()
737 def date(self):
737 def date(self):
738 return self._changectx.date()
738 return self._changectx.date()
739 def files(self):
739 def files(self):
740 return self._changectx.files()
740 return self._changectx.files()
741 def description(self):
741 def description(self):
742 return self._changectx.description()
742 return self._changectx.description()
743 def branch(self):
743 def branch(self):
744 return self._changectx.branch()
744 return self._changectx.branch()
745 def extra(self):
745 def extra(self):
746 return self._changectx.extra()
746 return self._changectx.extra()
747 def phase(self):
747 def phase(self):
748 return self._changectx.phase()
748 return self._changectx.phase()
749 def phasestr(self):
749 def phasestr(self):
750 return self._changectx.phasestr()
750 return self._changectx.phasestr()
751 def manifest(self):
751 def manifest(self):
752 return self._changectx.manifest()
752 return self._changectx.manifest()
753 def changectx(self):
753 def changectx(self):
754 return self._changectx
754 return self._changectx
755
755
756 def path(self):
756 def path(self):
757 return self._path
757 return self._path
758
758
759 def isbinary(self):
759 def isbinary(self):
760 try:
760 try:
761 return util.binary(self.data())
761 return util.binary(self.data())
762 except IOError:
762 except IOError:
763 return False
763 return False
764 def isexec(self):
764 def isexec(self):
765 return 'x' in self.flags()
765 return 'x' in self.flags()
766 def islink(self):
766 def islink(self):
767 return 'l' in self.flags()
767 return 'l' in self.flags()
768
768
769 def cmp(self, fctx):
769 def cmp(self, fctx):
770 """compare with other file context
770 """compare with other file context
771
771
772 returns True if different than fctx.
772 returns True if different than fctx.
773 """
773 """
774 if (fctx._filerev is None
774 if (fctx._filerev is None
775 and (self._repo._encodefilterpats
775 and (self._repo._encodefilterpats
776 # if file data starts with '\1\n', empty metadata block is
776 # if file data starts with '\1\n', empty metadata block is
777 # prepended, which adds 4 bytes to filelog.size().
777 # prepended, which adds 4 bytes to filelog.size().
778 or self.size() - 4 == fctx.size())
778 or self.size() - 4 == fctx.size())
779 or self.size() == fctx.size()):
779 or self.size() == fctx.size()):
780 return self._filelog.cmp(self._filenode, fctx.data())
780 return self._filelog.cmp(self._filenode, fctx.data())
781
781
782 return True
782 return True
783
783
784 def introrev(self):
784 def introrev(self):
785 """return the rev of the changeset which introduced this file revision
785 """return the rev of the changeset which introduced this file revision
786
786
787 This method is different from linkrev because it take into account the
787 This method is different from linkrev because it take into account the
788 changeset the filectx was created from. It ensures the returned
788 changeset the filectx was created from. It ensures the returned
789 revision is one of its ancestors. This prevents bugs from
789 revision is one of its ancestors. This prevents bugs from
790 'linkrev-shadowing' when a file revision is used by multiple
790 'linkrev-shadowing' when a file revision is used by multiple
791 changesets.
791 changesets.
792 """
792 """
793 lkr = self.linkrev()
793 lkr = self.linkrev()
794 attrs = vars(self)
794 attrs = vars(self)
795 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
795 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
796 if noctx or self.rev() == lkr:
796 if noctx or self.rev() == lkr:
797 return self.linkrev()
797 return self.linkrev()
798 return _adjustlinkrev(self._repo, self._path, self._filelog,
798 return _adjustlinkrev(self._repo, self._path, self._filelog,
799 self._filenode, self.rev(), inclusive=True)
799 self._filenode, self.rev(), inclusive=True)
800
800
801 def parents(self):
801 def parents(self):
802 _path = self._path
802 _path = self._path
803 fl = self._filelog
803 fl = self._filelog
804 parents = self._filelog.parents(self._filenode)
804 parents = self._filelog.parents(self._filenode)
805 pl = [(_path, node, fl) for node in parents if node != nullid]
805 pl = [(_path, node, fl) for node in parents if node != nullid]
806
806
807 r = fl.renamed(self._filenode)
807 r = fl.renamed(self._filenode)
808 if r:
808 if r:
809 # - In the simple rename case, both parent are nullid, pl is empty.
809 # - In the simple rename case, both parent are nullid, pl is empty.
810 # - In case of merge, only one of the parent is null id and should
810 # - In case of merge, only one of the parent is null id and should
811 # be replaced with the rename information. This parent is -always-
811 # be replaced with the rename information. This parent is -always-
812 # the first one.
812 # the first one.
813 #
813 #
814 # As null id have alway been filtered out in the previous list
814 # As null id have alway been filtered out in the previous list
815 # comprehension, inserting to 0 will always result in "replacing
815 # comprehension, inserting to 0 will always result in "replacing
816 # first nullid parent with rename information.
816 # first nullid parent with rename information.
817 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
817 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
818
818
819 ret = []
819 ret = []
820 for path, fnode, l in pl:
820 for path, fnode, l in pl:
821 if '_changeid' in vars(self) or '_changectx' in vars(self):
821 if '_changeid' in vars(self) or '_changectx' in vars(self):
822 # If self is associated with a changeset (probably explicitly
822 # If self is associated with a changeset (probably explicitly
823 # fed), ensure the created filectx is associated with a
823 # fed), ensure the created filectx is associated with a
824 # changeset that is an ancestor of self.changectx.
824 # changeset that is an ancestor of self.changectx.
825 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
825 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
826 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
826 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
827 changeid=rev)
827 changeid=rev)
828 else:
828 else:
829 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
829 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
830 ret.append(fctx)
830 ret.append(fctx)
831 return ret
831 return ret
832
832
833 def p1(self):
833 def p1(self):
834 return self.parents()[0]
834 return self.parents()[0]
835
835
836 def p2(self):
836 def p2(self):
837 p = self.parents()
837 p = self.parents()
838 if len(p) == 2:
838 if len(p) == 2:
839 return p[1]
839 return p[1]
840 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
840 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
841
841
842 def annotate(self, follow=False, linenumber=None, diffopts=None):
842 def annotate(self, follow=False, linenumber=None, diffopts=None):
843 '''returns a list of tuples of (ctx, line) for each line
843 '''returns a list of tuples of (ctx, line) for each line
844 in the file, where ctx is the filectx of the node where
844 in the file, where ctx is the filectx of the node where
845 that line was last changed.
845 that line was last changed.
846 This returns tuples of ((ctx, linenumber), line) for each line,
846 This returns tuples of ((ctx, linenumber), line) for each line,
847 if "linenumber" parameter is NOT "None".
847 if "linenumber" parameter is NOT "None".
848 In such tuples, linenumber means one at the first appearance
848 In such tuples, linenumber means one at the first appearance
849 in the managed file.
849 in the managed file.
850 To reduce annotation cost,
850 To reduce annotation cost,
851 this returns fixed value(False is used) as linenumber,
851 this returns fixed value(False is used) as linenumber,
852 if "linenumber" parameter is "False".'''
852 if "linenumber" parameter is "False".'''
853
853
854 if linenumber is None:
854 if linenumber is None:
855 def decorate(text, rev):
855 def decorate(text, rev):
856 return ([rev] * len(text.splitlines()), text)
856 return ([rev] * len(text.splitlines()), text)
857 elif linenumber:
857 elif linenumber:
858 def decorate(text, rev):
858 def decorate(text, rev):
859 size = len(text.splitlines())
859 size = len(text.splitlines())
860 return ([(rev, i) for i in xrange(1, size + 1)], text)
860 return ([(rev, i) for i in xrange(1, size + 1)], text)
861 else:
861 else:
862 def decorate(text, rev):
862 def decorate(text, rev):
863 return ([(rev, False)] * len(text.splitlines()), text)
863 return ([(rev, False)] * len(text.splitlines()), text)
864
864
865 def pair(parent, child):
865 def pair(parent, child):
866 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
866 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
867 refine=True)
867 refine=True)
868 for (a1, a2, b1, b2), t in blocks:
868 for (a1, a2, b1, b2), t in blocks:
869 # Changed blocks ('!') or blocks made only of blank lines ('~')
869 # Changed blocks ('!') or blocks made only of blank lines ('~')
870 # belong to the child.
870 # belong to the child.
871 if t == '=':
871 if t == '=':
872 child[0][b1:b2] = parent[0][a1:a2]
872 child[0][b1:b2] = parent[0][a1:a2]
873 return child
873 return child
874
874
875 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
875 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
876
876
877 def parents(f):
877 def parents(f):
878 pl = f.parents()
878 pl = f.parents()
879
879
880 # Don't return renamed parents if we aren't following.
880 # Don't return renamed parents if we aren't following.
881 if not follow:
881 if not follow:
882 pl = [p for p in pl if p.path() == f.path()]
882 pl = [p for p in pl if p.path() == f.path()]
883
883
884 # renamed filectx won't have a filelog yet, so set it
884 # renamed filectx won't have a filelog yet, so set it
885 # from the cache to save time
885 # from the cache to save time
886 for p in pl:
886 for p in pl:
887 if not '_filelog' in p.__dict__:
887 if not '_filelog' in p.__dict__:
888 p._filelog = getlog(p.path())
888 p._filelog = getlog(p.path())
889
889
890 return pl
890 return pl
891
891
892 # use linkrev to find the first changeset where self appeared
892 # use linkrev to find the first changeset where self appeared
893 base = self
893 base = self
894 introrev = self.introrev()
894 introrev = self.introrev()
895 if self.rev() != introrev:
895 if self.rev() != introrev:
896 base = filectx(self._repo, self._path, filelog=self.filelog(),
896 base = self.filectx(self.filenode(), changeid=introrev)
897 fileid=self.filenode(), changeid=introrev)
898
897
899 # This algorithm would prefer to be recursive, but Python is a
898 # This algorithm would prefer to be recursive, but Python is a
900 # bit recursion-hostile. Instead we do an iterative
899 # bit recursion-hostile. Instead we do an iterative
901 # depth-first search.
900 # depth-first search.
902
901
903 visit = [base]
902 visit = [base]
904 hist = {}
903 hist = {}
905 pcache = {}
904 pcache = {}
906 needed = {base: 1}
905 needed = {base: 1}
907 while visit:
906 while visit:
908 f = visit[-1]
907 f = visit[-1]
909 pcached = f in pcache
908 pcached = f in pcache
910 if not pcached:
909 if not pcached:
911 pcache[f] = parents(f)
910 pcache[f] = parents(f)
912
911
913 ready = True
912 ready = True
914 pl = pcache[f]
913 pl = pcache[f]
915 for p in pl:
914 for p in pl:
916 if p not in hist:
915 if p not in hist:
917 ready = False
916 ready = False
918 visit.append(p)
917 visit.append(p)
919 if not pcached:
918 if not pcached:
920 needed[p] = needed.get(p, 0) + 1
919 needed[p] = needed.get(p, 0) + 1
921 if ready:
920 if ready:
922 visit.pop()
921 visit.pop()
923 reusable = f in hist
922 reusable = f in hist
924 if reusable:
923 if reusable:
925 curr = hist[f]
924 curr = hist[f]
926 else:
925 else:
927 curr = decorate(f.data(), f)
926 curr = decorate(f.data(), f)
928 for p in pl:
927 for p in pl:
929 if not reusable:
928 if not reusable:
930 curr = pair(hist[p], curr)
929 curr = pair(hist[p], curr)
931 if needed[p] == 1:
930 if needed[p] == 1:
932 del hist[p]
931 del hist[p]
933 del needed[p]
932 del needed[p]
934 else:
933 else:
935 needed[p] -= 1
934 needed[p] -= 1
936
935
937 hist[f] = curr
936 hist[f] = curr
938 pcache[f] = []
937 pcache[f] = []
939
938
940 return zip(hist[base][0], hist[base][1].splitlines(True))
939 return zip(hist[base][0], hist[base][1].splitlines(True))
941
940
942 def ancestors(self, followfirst=False):
941 def ancestors(self, followfirst=False):
943 visit = {}
942 visit = {}
944 c = self
943 c = self
945 cut = followfirst and 1 or None
944 cut = followfirst and 1 or None
946 while True:
945 while True:
947 for parent in c.parents()[:cut]:
946 for parent in c.parents()[:cut]:
948 visit[(parent.rev(), parent.node())] = parent
947 visit[(parent.rev(), parent.node())] = parent
949 if not visit:
948 if not visit:
950 break
949 break
951 c = visit.pop(max(visit))
950 c = visit.pop(max(visit))
952 yield c
951 yield c
953
952
954 class filectx(basefilectx):
953 class filectx(basefilectx):
955 """A filecontext object makes access to data related to a particular
954 """A filecontext object makes access to data related to a particular
956 filerevision convenient."""
955 filerevision convenient."""
957 def __init__(self, repo, path, changeid=None, fileid=None,
956 def __init__(self, repo, path, changeid=None, fileid=None,
958 filelog=None, changectx=None):
957 filelog=None, changectx=None):
959 """changeid can be a changeset revision, node, or tag.
958 """changeid can be a changeset revision, node, or tag.
960 fileid can be a file revision or node."""
959 fileid can be a file revision or node."""
961 self._repo = repo
960 self._repo = repo
962 self._path = path
961 self._path = path
963
962
964 assert (changeid is not None
963 assert (changeid is not None
965 or fileid is not None
964 or fileid is not None
966 or changectx is not None), \
965 or changectx is not None), \
967 ("bad args: changeid=%r, fileid=%r, changectx=%r"
966 ("bad args: changeid=%r, fileid=%r, changectx=%r"
968 % (changeid, fileid, changectx))
967 % (changeid, fileid, changectx))
969
968
970 if filelog is not None:
969 if filelog is not None:
971 self._filelog = filelog
970 self._filelog = filelog
972
971
973 if changeid is not None:
972 if changeid is not None:
974 self._changeid = changeid
973 self._changeid = changeid
975 if changectx is not None:
974 if changectx is not None:
976 self._changectx = changectx
975 self._changectx = changectx
977 if fileid is not None:
976 if fileid is not None:
978 self._fileid = fileid
977 self._fileid = fileid
979
978
980 @propertycache
979 @propertycache
981 def _changectx(self):
980 def _changectx(self):
982 try:
981 try:
983 return changectx(self._repo, self._changeid)
982 return changectx(self._repo, self._changeid)
984 except error.FilteredRepoLookupError:
983 except error.FilteredRepoLookupError:
985 # Linkrev may point to any revision in the repository. When the
984 # Linkrev may point to any revision in the repository. When the
986 # repository is filtered this may lead to `filectx` trying to build
985 # repository is filtered this may lead to `filectx` trying to build
987 # `changectx` for filtered revision. In such case we fallback to
986 # `changectx` for filtered revision. In such case we fallback to
988 # creating `changectx` on the unfiltered version of the reposition.
987 # creating `changectx` on the unfiltered version of the reposition.
989 # This fallback should not be an issue because `changectx` from
988 # This fallback should not be an issue because `changectx` from
990 # `filectx` are not used in complex operations that care about
989 # `filectx` are not used in complex operations that care about
991 # filtering.
990 # filtering.
992 #
991 #
993 # This fallback is a cheap and dirty fix that prevent several
992 # This fallback is a cheap and dirty fix that prevent several
994 # crashes. It does not ensure the behavior is correct. However the
993 # crashes. It does not ensure the behavior is correct. However the
995 # behavior was not correct before filtering either and "incorrect
994 # behavior was not correct before filtering either and "incorrect
996 # behavior" is seen as better as "crash"
995 # behavior" is seen as better as "crash"
997 #
996 #
998 # Linkrevs have several serious troubles with filtering that are
997 # Linkrevs have several serious troubles with filtering that are
999 # complicated to solve. Proper handling of the issue here should be
998 # complicated to solve. Proper handling of the issue here should be
1000 # considered when solving linkrev issue are on the table.
999 # considered when solving linkrev issue are on the table.
1001 return changectx(self._repo.unfiltered(), self._changeid)
1000 return changectx(self._repo.unfiltered(), self._changeid)
1002
1001
1003 def filectx(self, fileid):
1002 def filectx(self, fileid, changeid=None):
1004 '''opens an arbitrary revision of the file without
1003 '''opens an arbitrary revision of the file without
1005 opening a new filelog'''
1004 opening a new filelog'''
1006 return filectx(self._repo, self._path, fileid=fileid,
1005 return filectx(self._repo, self._path, fileid=fileid,
1007 filelog=self._filelog)
1006 filelog=self._filelog, changeid=changeid)
1008
1007
1009 def data(self):
1008 def data(self):
1010 try:
1009 try:
1011 return self._filelog.read(self._filenode)
1010 return self._filelog.read(self._filenode)
1012 except error.CensoredNodeError:
1011 except error.CensoredNodeError:
1013 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1012 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1014 return ""
1013 return ""
1015 raise util.Abort(_("censored node: %s") % short(self._filenode),
1014 raise util.Abort(_("censored node: %s") % short(self._filenode),
1016 hint=_("set censor.policy to ignore errors"))
1015 hint=_("set censor.policy to ignore errors"))
1017
1016
1018 def size(self):
1017 def size(self):
1019 return self._filelog.size(self._filerev)
1018 return self._filelog.size(self._filerev)
1020
1019
1021 def renamed(self):
1020 def renamed(self):
1022 """check if file was actually renamed in this changeset revision
1021 """check if file was actually renamed in this changeset revision
1023
1022
1024 If rename logged in file revision, we report copy for changeset only
1023 If rename logged in file revision, we report copy for changeset only
1025 if file revisions linkrev points back to the changeset in question
1024 if file revisions linkrev points back to the changeset in question
1026 or both changeset parents contain different file revisions.
1025 or both changeset parents contain different file revisions.
1027 """
1026 """
1028
1027
1029 renamed = self._filelog.renamed(self._filenode)
1028 renamed = self._filelog.renamed(self._filenode)
1030 if not renamed:
1029 if not renamed:
1031 return renamed
1030 return renamed
1032
1031
1033 if self.rev() == self.linkrev():
1032 if self.rev() == self.linkrev():
1034 return renamed
1033 return renamed
1035
1034
1036 name = self.path()
1035 name = self.path()
1037 fnode = self._filenode
1036 fnode = self._filenode
1038 for p in self._changectx.parents():
1037 for p in self._changectx.parents():
1039 try:
1038 try:
1040 if fnode == p.filenode(name):
1039 if fnode == p.filenode(name):
1041 return None
1040 return None
1042 except error.LookupError:
1041 except error.LookupError:
1043 pass
1042 pass
1044 return renamed
1043 return renamed
1045
1044
1046 def children(self):
1045 def children(self):
1047 # hard for renames
1046 # hard for renames
1048 c = self._filelog.children(self._filenode)
1047 c = self._filelog.children(self._filenode)
1049 return [filectx(self._repo, self._path, fileid=x,
1048 return [filectx(self._repo, self._path, fileid=x,
1050 filelog=self._filelog) for x in c]
1049 filelog=self._filelog) for x in c]
1051
1050
1052 class committablectx(basectx):
1051 class committablectx(basectx):
1053 """A committablectx object provides common functionality for a context that
1052 """A committablectx object provides common functionality for a context that
1054 wants the ability to commit, e.g. workingctx or memctx."""
1053 wants the ability to commit, e.g. workingctx or memctx."""
1055 def __init__(self, repo, text="", user=None, date=None, extra=None,
1054 def __init__(self, repo, text="", user=None, date=None, extra=None,
1056 changes=None):
1055 changes=None):
1057 self._repo = repo
1056 self._repo = repo
1058 self._rev = None
1057 self._rev = None
1059 self._node = None
1058 self._node = None
1060 self._text = text
1059 self._text = text
1061 if date:
1060 if date:
1062 self._date = util.parsedate(date)
1061 self._date = util.parsedate(date)
1063 if user:
1062 if user:
1064 self._user = user
1063 self._user = user
1065 if changes:
1064 if changes:
1066 self._status = changes
1065 self._status = changes
1067
1066
1068 self._extra = {}
1067 self._extra = {}
1069 if extra:
1068 if extra:
1070 self._extra = extra.copy()
1069 self._extra = extra.copy()
1071 if 'branch' not in self._extra:
1070 if 'branch' not in self._extra:
1072 try:
1071 try:
1073 branch = encoding.fromlocal(self._repo.dirstate.branch())
1072 branch = encoding.fromlocal(self._repo.dirstate.branch())
1074 except UnicodeDecodeError:
1073 except UnicodeDecodeError:
1075 raise util.Abort(_('branch name not in UTF-8!'))
1074 raise util.Abort(_('branch name not in UTF-8!'))
1076 self._extra['branch'] = branch
1075 self._extra['branch'] = branch
1077 if self._extra['branch'] == '':
1076 if self._extra['branch'] == '':
1078 self._extra['branch'] = 'default'
1077 self._extra['branch'] = 'default'
1079
1078
1080 def __str__(self):
1079 def __str__(self):
1081 return str(self._parents[0]) + "+"
1080 return str(self._parents[0]) + "+"
1082
1081
1083 def __nonzero__(self):
1082 def __nonzero__(self):
1084 return True
1083 return True
1085
1084
1086 def _buildflagfunc(self):
1085 def _buildflagfunc(self):
1087 # Create a fallback function for getting file flags when the
1086 # Create a fallback function for getting file flags when the
1088 # filesystem doesn't support them
1087 # filesystem doesn't support them
1089
1088
1090 copiesget = self._repo.dirstate.copies().get
1089 copiesget = self._repo.dirstate.copies().get
1091
1090
1092 if len(self._parents) < 2:
1091 if len(self._parents) < 2:
1093 # when we have one parent, it's easy: copy from parent
1092 # when we have one parent, it's easy: copy from parent
1094 man = self._parents[0].manifest()
1093 man = self._parents[0].manifest()
1095 def func(f):
1094 def func(f):
1096 f = copiesget(f, f)
1095 f = copiesget(f, f)
1097 return man.flags(f)
1096 return man.flags(f)
1098 else:
1097 else:
1099 # merges are tricky: we try to reconstruct the unstored
1098 # merges are tricky: we try to reconstruct the unstored
1100 # result from the merge (issue1802)
1099 # result from the merge (issue1802)
1101 p1, p2 = self._parents
1100 p1, p2 = self._parents
1102 pa = p1.ancestor(p2)
1101 pa = p1.ancestor(p2)
1103 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1102 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1104
1103
1105 def func(f):
1104 def func(f):
1106 f = copiesget(f, f) # may be wrong for merges with copies
1105 f = copiesget(f, f) # may be wrong for merges with copies
1107 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1106 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1108 if fl1 == fl2:
1107 if fl1 == fl2:
1109 return fl1
1108 return fl1
1110 if fl1 == fla:
1109 if fl1 == fla:
1111 return fl2
1110 return fl2
1112 if fl2 == fla:
1111 if fl2 == fla:
1113 return fl1
1112 return fl1
1114 return '' # punt for conflicts
1113 return '' # punt for conflicts
1115
1114
1116 return func
1115 return func
1117
1116
1118 @propertycache
1117 @propertycache
1119 def _flagfunc(self):
1118 def _flagfunc(self):
1120 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1119 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1121
1120
1122 @propertycache
1121 @propertycache
1123 def _manifest(self):
1122 def _manifest(self):
1124 """generate a manifest corresponding to the values in self._status
1123 """generate a manifest corresponding to the values in self._status
1125
1124
1126 This reuse the file nodeid from parent, but we append an extra letter
1125 This reuse the file nodeid from parent, but we append an extra letter
1127 when modified. Modified files get an extra 'm' while added files get
1126 when modified. Modified files get an extra 'm' while added files get
1128 an extra 'a'. This is used by manifests merge to see that files
1127 an extra 'a'. This is used by manifests merge to see that files
1129 are different and by update logic to avoid deleting newly added files.
1128 are different and by update logic to avoid deleting newly added files.
1130 """
1129 """
1131
1130
1132 man1 = self._parents[0].manifest()
1131 man1 = self._parents[0].manifest()
1133 man = man1.copy()
1132 man = man1.copy()
1134 if len(self._parents) > 1:
1133 if len(self._parents) > 1:
1135 man2 = self.p2().manifest()
1134 man2 = self.p2().manifest()
1136 def getman(f):
1135 def getman(f):
1137 if f in man1:
1136 if f in man1:
1138 return man1
1137 return man1
1139 return man2
1138 return man2
1140 else:
1139 else:
1141 getman = lambda f: man1
1140 getman = lambda f: man1
1142
1141
1143 copied = self._repo.dirstate.copies()
1142 copied = self._repo.dirstate.copies()
1144 ff = self._flagfunc
1143 ff = self._flagfunc
1145 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1144 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1146 for f in l:
1145 for f in l:
1147 orig = copied.get(f, f)
1146 orig = copied.get(f, f)
1148 man[f] = getman(orig).get(orig, nullid) + i
1147 man[f] = getman(orig).get(orig, nullid) + i
1149 try:
1148 try:
1150 man.setflag(f, ff(f))
1149 man.setflag(f, ff(f))
1151 except OSError:
1150 except OSError:
1152 pass
1151 pass
1153
1152
1154 for f in self._status.deleted + self._status.removed:
1153 for f in self._status.deleted + self._status.removed:
1155 if f in man:
1154 if f in man:
1156 del man[f]
1155 del man[f]
1157
1156
1158 return man
1157 return man
1159
1158
1160 @propertycache
1159 @propertycache
1161 def _status(self):
1160 def _status(self):
1162 return self._repo.status()
1161 return self._repo.status()
1163
1162
1164 @propertycache
1163 @propertycache
1165 def _user(self):
1164 def _user(self):
1166 return self._repo.ui.username()
1165 return self._repo.ui.username()
1167
1166
1168 @propertycache
1167 @propertycache
1169 def _date(self):
1168 def _date(self):
1170 return util.makedate()
1169 return util.makedate()
1171
1170
1172 def subrev(self, subpath):
1171 def subrev(self, subpath):
1173 return None
1172 return None
1174
1173
1175 def user(self):
1174 def user(self):
1176 return self._user or self._repo.ui.username()
1175 return self._user or self._repo.ui.username()
1177 def date(self):
1176 def date(self):
1178 return self._date
1177 return self._date
1179 def description(self):
1178 def description(self):
1180 return self._text
1179 return self._text
1181 def files(self):
1180 def files(self):
1182 return sorted(self._status.modified + self._status.added +
1181 return sorted(self._status.modified + self._status.added +
1183 self._status.removed)
1182 self._status.removed)
1184
1183
1185 def modified(self):
1184 def modified(self):
1186 return self._status.modified
1185 return self._status.modified
1187 def added(self):
1186 def added(self):
1188 return self._status.added
1187 return self._status.added
1189 def removed(self):
1188 def removed(self):
1190 return self._status.removed
1189 return self._status.removed
1191 def deleted(self):
1190 def deleted(self):
1192 return self._status.deleted
1191 return self._status.deleted
1193 def branch(self):
1192 def branch(self):
1194 return encoding.tolocal(self._extra['branch'])
1193 return encoding.tolocal(self._extra['branch'])
1195 def closesbranch(self):
1194 def closesbranch(self):
1196 return 'close' in self._extra
1195 return 'close' in self._extra
1197 def extra(self):
1196 def extra(self):
1198 return self._extra
1197 return self._extra
1199
1198
1200 def tags(self):
1199 def tags(self):
1201 t = []
1200 t = []
1202 for p in self.parents():
1201 for p in self.parents():
1203 t.extend(p.tags())
1202 t.extend(p.tags())
1204 return t
1203 return t
1205
1204
1206 def bookmarks(self):
1205 def bookmarks(self):
1207 b = []
1206 b = []
1208 for p in self.parents():
1207 for p in self.parents():
1209 b.extend(p.bookmarks())
1208 b.extend(p.bookmarks())
1210 return b
1209 return b
1211
1210
1212 def phase(self):
1211 def phase(self):
1213 phase = phases.draft # default phase to draft
1212 phase = phases.draft # default phase to draft
1214 for p in self.parents():
1213 for p in self.parents():
1215 phase = max(phase, p.phase())
1214 phase = max(phase, p.phase())
1216 return phase
1215 return phase
1217
1216
1218 def hidden(self):
1217 def hidden(self):
1219 return False
1218 return False
1220
1219
1221 def children(self):
1220 def children(self):
1222 return []
1221 return []
1223
1222
1224 def flags(self, path):
1223 def flags(self, path):
1225 if '_manifest' in self.__dict__:
1224 if '_manifest' in self.__dict__:
1226 try:
1225 try:
1227 return self._manifest.flags(path)
1226 return self._manifest.flags(path)
1228 except KeyError:
1227 except KeyError:
1229 return ''
1228 return ''
1230
1229
1231 try:
1230 try:
1232 return self._flagfunc(path)
1231 return self._flagfunc(path)
1233 except OSError:
1232 except OSError:
1234 return ''
1233 return ''
1235
1234
1236 def ancestor(self, c2):
1235 def ancestor(self, c2):
1237 """return the "best" ancestor context of self and c2"""
1236 """return the "best" ancestor context of self and c2"""
1238 return self._parents[0].ancestor(c2) # punt on two parents for now
1237 return self._parents[0].ancestor(c2) # punt on two parents for now
1239
1238
1240 def walk(self, match):
1239 def walk(self, match):
1241 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1240 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1242 True, False))
1241 True, False))
1243
1242
1244 def matches(self, match):
1243 def matches(self, match):
1245 return sorted(self._repo.dirstate.matches(match))
1244 return sorted(self._repo.dirstate.matches(match))
1246
1245
1247 def ancestors(self):
1246 def ancestors(self):
1248 for p in self._parents:
1247 for p in self._parents:
1249 yield p
1248 yield p
1250 for a in self._repo.changelog.ancestors(
1249 for a in self._repo.changelog.ancestors(
1251 [p.rev() for p in self._parents]):
1250 [p.rev() for p in self._parents]):
1252 yield changectx(self._repo, a)
1251 yield changectx(self._repo, a)
1253
1252
1254 def markcommitted(self, node):
1253 def markcommitted(self, node):
1255 """Perform post-commit cleanup necessary after committing this ctx
1254 """Perform post-commit cleanup necessary after committing this ctx
1256
1255
1257 Specifically, this updates backing stores this working context
1256 Specifically, this updates backing stores this working context
1258 wraps to reflect the fact that the changes reflected by this
1257 wraps to reflect the fact that the changes reflected by this
1259 workingctx have been committed. For example, it marks
1258 workingctx have been committed. For example, it marks
1260 modified and added files as normal in the dirstate.
1259 modified and added files as normal in the dirstate.
1261
1260
1262 """
1261 """
1263
1262
1264 self._repo.dirstate.beginparentchange()
1263 self._repo.dirstate.beginparentchange()
1265 for f in self.modified() + self.added():
1264 for f in self.modified() + self.added():
1266 self._repo.dirstate.normal(f)
1265 self._repo.dirstate.normal(f)
1267 for f in self.removed():
1266 for f in self.removed():
1268 self._repo.dirstate.drop(f)
1267 self._repo.dirstate.drop(f)
1269 self._repo.dirstate.setparents(node)
1268 self._repo.dirstate.setparents(node)
1270 self._repo.dirstate.endparentchange()
1269 self._repo.dirstate.endparentchange()
1271
1270
1272 def dirs(self):
1271 def dirs(self):
1273 return self._repo.dirstate.dirs()
1272 return self._repo.dirstate.dirs()
1274
1273
1275 class workingctx(committablectx):
1274 class workingctx(committablectx):
1276 """A workingctx object makes access to data related to
1275 """A workingctx object makes access to data related to
1277 the current working directory convenient.
1276 the current working directory convenient.
1278 date - any valid date string or (unixtime, offset), or None.
1277 date - any valid date string or (unixtime, offset), or None.
1279 user - username string, or None.
1278 user - username string, or None.
1280 extra - a dictionary of extra values, or None.
1279 extra - a dictionary of extra values, or None.
1281 changes - a list of file lists as returned by localrepo.status()
1280 changes - a list of file lists as returned by localrepo.status()
1282 or None to use the repository status.
1281 or None to use the repository status.
1283 """
1282 """
1284 def __init__(self, repo, text="", user=None, date=None, extra=None,
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1285 changes=None):
1284 changes=None):
1286 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1287
1286
1288 def __iter__(self):
1287 def __iter__(self):
1289 d = self._repo.dirstate
1288 d = self._repo.dirstate
1290 for f in d:
1289 for f in d:
1291 if d[f] != 'r':
1290 if d[f] != 'r':
1292 yield f
1291 yield f
1293
1292
1294 def __contains__(self, key):
1293 def __contains__(self, key):
1295 return self._repo.dirstate[key] not in "?r"
1294 return self._repo.dirstate[key] not in "?r"
1296
1295
1297 @propertycache
1296 @propertycache
1298 def _parents(self):
1297 def _parents(self):
1299 p = self._repo.dirstate.parents()
1298 p = self._repo.dirstate.parents()
1300 if p[1] == nullid:
1299 if p[1] == nullid:
1301 p = p[:-1]
1300 p = p[:-1]
1302 return [changectx(self._repo, x) for x in p]
1301 return [changectx(self._repo, x) for x in p]
1303
1302
1304 def filectx(self, path, filelog=None):
1303 def filectx(self, path, filelog=None):
1305 """get a file context from the working directory"""
1304 """get a file context from the working directory"""
1306 return workingfilectx(self._repo, path, workingctx=self,
1305 return workingfilectx(self._repo, path, workingctx=self,
1307 filelog=filelog)
1306 filelog=filelog)
1308
1307
1309 def dirty(self, missing=False, merge=True, branch=True):
1308 def dirty(self, missing=False, merge=True, branch=True):
1310 "check whether a working directory is modified"
1309 "check whether a working directory is modified"
1311 # check subrepos first
1310 # check subrepos first
1312 for s in sorted(self.substate):
1311 for s in sorted(self.substate):
1313 if self.sub(s).dirty():
1312 if self.sub(s).dirty():
1314 return True
1313 return True
1315 # check current working dir
1314 # check current working dir
1316 return ((merge and self.p2()) or
1315 return ((merge and self.p2()) or
1317 (branch and self.branch() != self.p1().branch()) or
1316 (branch and self.branch() != self.p1().branch()) or
1318 self.modified() or self.added() or self.removed() or
1317 self.modified() or self.added() or self.removed() or
1319 (missing and self.deleted()))
1318 (missing and self.deleted()))
1320
1319
1321 def add(self, list, prefix=""):
1320 def add(self, list, prefix=""):
1322 join = lambda f: os.path.join(prefix, f)
1321 join = lambda f: os.path.join(prefix, f)
1323 wlock = self._repo.wlock()
1322 wlock = self._repo.wlock()
1324 ui, ds = self._repo.ui, self._repo.dirstate
1323 ui, ds = self._repo.ui, self._repo.dirstate
1325 try:
1324 try:
1326 rejected = []
1325 rejected = []
1327 lstat = self._repo.wvfs.lstat
1326 lstat = self._repo.wvfs.lstat
1328 for f in list:
1327 for f in list:
1329 scmutil.checkportable(ui, join(f))
1328 scmutil.checkportable(ui, join(f))
1330 try:
1329 try:
1331 st = lstat(f)
1330 st = lstat(f)
1332 except OSError:
1331 except OSError:
1333 ui.warn(_("%s does not exist!\n") % join(f))
1332 ui.warn(_("%s does not exist!\n") % join(f))
1334 rejected.append(f)
1333 rejected.append(f)
1335 continue
1334 continue
1336 if st.st_size > 10000000:
1335 if st.st_size > 10000000:
1337 ui.warn(_("%s: up to %d MB of RAM may be required "
1336 ui.warn(_("%s: up to %d MB of RAM may be required "
1338 "to manage this file\n"
1337 "to manage this file\n"
1339 "(use 'hg revert %s' to cancel the "
1338 "(use 'hg revert %s' to cancel the "
1340 "pending addition)\n")
1339 "pending addition)\n")
1341 % (f, 3 * st.st_size // 1000000, join(f)))
1340 % (f, 3 * st.st_size // 1000000, join(f)))
1342 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1343 ui.warn(_("%s not added: only files and symlinks "
1342 ui.warn(_("%s not added: only files and symlinks "
1344 "supported currently\n") % join(f))
1343 "supported currently\n") % join(f))
1345 rejected.append(f)
1344 rejected.append(f)
1346 elif ds[f] in 'amn':
1345 elif ds[f] in 'amn':
1347 ui.warn(_("%s already tracked!\n") % join(f))
1346 ui.warn(_("%s already tracked!\n") % join(f))
1348 elif ds[f] == 'r':
1347 elif ds[f] == 'r':
1349 ds.normallookup(f)
1348 ds.normallookup(f)
1350 else:
1349 else:
1351 ds.add(f)
1350 ds.add(f)
1352 return rejected
1351 return rejected
1353 finally:
1352 finally:
1354 wlock.release()
1353 wlock.release()
1355
1354
1356 def forget(self, files, prefix=""):
1355 def forget(self, files, prefix=""):
1357 join = lambda f: os.path.join(prefix, f)
1356 join = lambda f: os.path.join(prefix, f)
1358 wlock = self._repo.wlock()
1357 wlock = self._repo.wlock()
1359 try:
1358 try:
1360 rejected = []
1359 rejected = []
1361 for f in files:
1360 for f in files:
1362 if f not in self._repo.dirstate:
1361 if f not in self._repo.dirstate:
1363 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1362 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1364 rejected.append(f)
1363 rejected.append(f)
1365 elif self._repo.dirstate[f] != 'a':
1364 elif self._repo.dirstate[f] != 'a':
1366 self._repo.dirstate.remove(f)
1365 self._repo.dirstate.remove(f)
1367 else:
1366 else:
1368 self._repo.dirstate.drop(f)
1367 self._repo.dirstate.drop(f)
1369 return rejected
1368 return rejected
1370 finally:
1369 finally:
1371 wlock.release()
1370 wlock.release()
1372
1371
1373 def undelete(self, list):
1372 def undelete(self, list):
1374 pctxs = self.parents()
1373 pctxs = self.parents()
1375 wlock = self._repo.wlock()
1374 wlock = self._repo.wlock()
1376 try:
1375 try:
1377 for f in list:
1376 for f in list:
1378 if self._repo.dirstate[f] != 'r':
1377 if self._repo.dirstate[f] != 'r':
1379 self._repo.ui.warn(_("%s not removed!\n") % f)
1378 self._repo.ui.warn(_("%s not removed!\n") % f)
1380 else:
1379 else:
1381 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1380 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1382 t = fctx.data()
1381 t = fctx.data()
1383 self._repo.wwrite(f, t, fctx.flags())
1382 self._repo.wwrite(f, t, fctx.flags())
1384 self._repo.dirstate.normal(f)
1383 self._repo.dirstate.normal(f)
1385 finally:
1384 finally:
1386 wlock.release()
1385 wlock.release()
1387
1386
1388 def copy(self, source, dest):
1387 def copy(self, source, dest):
1389 try:
1388 try:
1390 st = self._repo.wvfs.lstat(dest)
1389 st = self._repo.wvfs.lstat(dest)
1391 except OSError, err:
1390 except OSError, err:
1392 if err.errno != errno.ENOENT:
1391 if err.errno != errno.ENOENT:
1393 raise
1392 raise
1394 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1393 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1395 return
1394 return
1396 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1397 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1396 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1398 "symbolic link\n") % dest)
1397 "symbolic link\n") % dest)
1399 else:
1398 else:
1400 wlock = self._repo.wlock()
1399 wlock = self._repo.wlock()
1401 try:
1400 try:
1402 if self._repo.dirstate[dest] in '?':
1401 if self._repo.dirstate[dest] in '?':
1403 self._repo.dirstate.add(dest)
1402 self._repo.dirstate.add(dest)
1404 elif self._repo.dirstate[dest] in 'r':
1403 elif self._repo.dirstate[dest] in 'r':
1405 self._repo.dirstate.normallookup(dest)
1404 self._repo.dirstate.normallookup(dest)
1406 self._repo.dirstate.copy(source, dest)
1405 self._repo.dirstate.copy(source, dest)
1407 finally:
1406 finally:
1408 wlock.release()
1407 wlock.release()
1409
1408
1410 def _filtersuspectsymlink(self, files):
1409 def _filtersuspectsymlink(self, files):
1411 if not files or self._repo.dirstate._checklink:
1410 if not files or self._repo.dirstate._checklink:
1412 return files
1411 return files
1413
1412
1414 # Symlink placeholders may get non-symlink-like contents
1413 # Symlink placeholders may get non-symlink-like contents
1415 # via user error or dereferencing by NFS or Samba servers,
1414 # via user error or dereferencing by NFS or Samba servers,
1416 # so we filter out any placeholders that don't look like a
1415 # so we filter out any placeholders that don't look like a
1417 # symlink
1416 # symlink
1418 sane = []
1417 sane = []
1419 for f in files:
1418 for f in files:
1420 if self.flags(f) == 'l':
1419 if self.flags(f) == 'l':
1421 d = self[f].data()
1420 d = self[f].data()
1422 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1421 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1423 self._repo.ui.debug('ignoring suspect symlink placeholder'
1422 self._repo.ui.debug('ignoring suspect symlink placeholder'
1424 ' "%s"\n' % f)
1423 ' "%s"\n' % f)
1425 continue
1424 continue
1426 sane.append(f)
1425 sane.append(f)
1427 return sane
1426 return sane
1428
1427
1429 def _checklookup(self, files):
1428 def _checklookup(self, files):
1430 # check for any possibly clean files
1429 # check for any possibly clean files
1431 if not files:
1430 if not files:
1432 return [], []
1431 return [], []
1433
1432
1434 modified = []
1433 modified = []
1435 fixup = []
1434 fixup = []
1436 pctx = self._parents[0]
1435 pctx = self._parents[0]
1437 # do a full compare of any files that might have changed
1436 # do a full compare of any files that might have changed
1438 for f in sorted(files):
1437 for f in sorted(files):
1439 if (f not in pctx or self.flags(f) != pctx.flags(f)
1438 if (f not in pctx or self.flags(f) != pctx.flags(f)
1440 or pctx[f].cmp(self[f])):
1439 or pctx[f].cmp(self[f])):
1441 modified.append(f)
1440 modified.append(f)
1442 else:
1441 else:
1443 fixup.append(f)
1442 fixup.append(f)
1444
1443
1445 # update dirstate for files that are actually clean
1444 # update dirstate for files that are actually clean
1446 if fixup:
1445 if fixup:
1447 try:
1446 try:
1448 # updating the dirstate is optional
1447 # updating the dirstate is optional
1449 # so we don't wait on the lock
1448 # so we don't wait on the lock
1450 # wlock can invalidate the dirstate, so cache normal _after_
1449 # wlock can invalidate the dirstate, so cache normal _after_
1451 # taking the lock
1450 # taking the lock
1452 wlock = self._repo.wlock(False)
1451 wlock = self._repo.wlock(False)
1453 normal = self._repo.dirstate.normal
1452 normal = self._repo.dirstate.normal
1454 try:
1453 try:
1455 for f in fixup:
1454 for f in fixup:
1456 normal(f)
1455 normal(f)
1457 finally:
1456 finally:
1458 wlock.release()
1457 wlock.release()
1459 except error.LockError:
1458 except error.LockError:
1460 pass
1459 pass
1461 return modified, fixup
1460 return modified, fixup
1462
1461
1463 def _manifestmatches(self, match, s):
1462 def _manifestmatches(self, match, s):
1464 """Slow path for workingctx
1463 """Slow path for workingctx
1465
1464
1466 The fast path is when we compare the working directory to its parent
1465 The fast path is when we compare the working directory to its parent
1467 which means this function is comparing with a non-parent; therefore we
1466 which means this function is comparing with a non-parent; therefore we
1468 need to build a manifest and return what matches.
1467 need to build a manifest and return what matches.
1469 """
1468 """
1470 mf = self._repo['.']._manifestmatches(match, s)
1469 mf = self._repo['.']._manifestmatches(match, s)
1471 for f in s.modified + s.added:
1470 for f in s.modified + s.added:
1472 mf[f] = _newnode
1471 mf[f] = _newnode
1473 mf.setflag(f, self.flags(f))
1472 mf.setflag(f, self.flags(f))
1474 for f in s.removed:
1473 for f in s.removed:
1475 if f in mf:
1474 if f in mf:
1476 del mf[f]
1475 del mf[f]
1477 return mf
1476 return mf
1478
1477
1479 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1478 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1480 unknown=False):
1479 unknown=False):
1481 '''Gets the status from the dirstate -- internal use only.'''
1480 '''Gets the status from the dirstate -- internal use only.'''
1482 listignored, listclean, listunknown = ignored, clean, unknown
1481 listignored, listclean, listunknown = ignored, clean, unknown
1483 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1482 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1484 subrepos = []
1483 subrepos = []
1485 if '.hgsub' in self:
1484 if '.hgsub' in self:
1486 subrepos = sorted(self.substate)
1485 subrepos = sorted(self.substate)
1487 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1486 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1488 listclean, listunknown)
1487 listclean, listunknown)
1489
1488
1490 # check for any possibly clean files
1489 # check for any possibly clean files
1491 if cmp:
1490 if cmp:
1492 modified2, fixup = self._checklookup(cmp)
1491 modified2, fixup = self._checklookup(cmp)
1493 s.modified.extend(modified2)
1492 s.modified.extend(modified2)
1494
1493
1495 # update dirstate for files that are actually clean
1494 # update dirstate for files that are actually clean
1496 if fixup and listclean:
1495 if fixup and listclean:
1497 s.clean.extend(fixup)
1496 s.clean.extend(fixup)
1498
1497
1499 return s
1498 return s
1500
1499
1501 def _buildstatus(self, other, s, match, listignored, listclean,
1500 def _buildstatus(self, other, s, match, listignored, listclean,
1502 listunknown):
1501 listunknown):
1503 """build a status with respect to another context
1502 """build a status with respect to another context
1504
1503
1505 This includes logic for maintaining the fast path of status when
1504 This includes logic for maintaining the fast path of status when
1506 comparing the working directory against its parent, which is to skip
1505 comparing the working directory against its parent, which is to skip
1507 building a new manifest if self (working directory) is not comparing
1506 building a new manifest if self (working directory) is not comparing
1508 against its parent (repo['.']).
1507 against its parent (repo['.']).
1509 """
1508 """
1510 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1509 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1511 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1510 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1512 # might have accidentally ended up with the entire contents of the file
1511 # might have accidentally ended up with the entire contents of the file
1513 # they are supposed to be linking to.
1512 # they are supposed to be linking to.
1514 s.modified[:] = self._filtersuspectsymlink(s.modified)
1513 s.modified[:] = self._filtersuspectsymlink(s.modified)
1515 if other != self._repo['.']:
1514 if other != self._repo['.']:
1516 s = super(workingctx, self)._buildstatus(other, s, match,
1515 s = super(workingctx, self)._buildstatus(other, s, match,
1517 listignored, listclean,
1516 listignored, listclean,
1518 listunknown)
1517 listunknown)
1519 elif match.always():
1518 elif match.always():
1520 # cache for performance
1519 # cache for performance
1521 if s.unknown or s.ignored or s.clean:
1520 if s.unknown or s.ignored or s.clean:
1522 # "_status" is cached with list*=False in the normal route
1521 # "_status" is cached with list*=False in the normal route
1523 self._status = scmutil.status(s.modified, s.added, s.removed,
1522 self._status = scmutil.status(s.modified, s.added, s.removed,
1524 s.deleted, [], [], [])
1523 s.deleted, [], [], [])
1525 else:
1524 else:
1526 self._status = s
1525 self._status = s
1527 return s
1526 return s
1528
1527
1529 def _matchstatus(self, other, match):
1528 def _matchstatus(self, other, match):
1530 """override the match method with a filter for directory patterns
1529 """override the match method with a filter for directory patterns
1531
1530
1532 We use inheritance to customize the match.bad method only in cases of
1531 We use inheritance to customize the match.bad method only in cases of
1533 workingctx since it belongs only to the working directory when
1532 workingctx since it belongs only to the working directory when
1534 comparing against the parent changeset.
1533 comparing against the parent changeset.
1535
1534
1536 If we aren't comparing against the working directory's parent, then we
1535 If we aren't comparing against the working directory's parent, then we
1537 just use the default match object sent to us.
1536 just use the default match object sent to us.
1538 """
1537 """
1539 superself = super(workingctx, self)
1538 superself = super(workingctx, self)
1540 match = superself._matchstatus(other, match)
1539 match = superself._matchstatus(other, match)
1541 if other != self._repo['.']:
1540 if other != self._repo['.']:
1542 def bad(f, msg):
1541 def bad(f, msg):
1543 # 'f' may be a directory pattern from 'match.files()',
1542 # 'f' may be a directory pattern from 'match.files()',
1544 # so 'f not in ctx1' is not enough
1543 # so 'f not in ctx1' is not enough
1545 if f not in other and f not in other.dirs():
1544 if f not in other and f not in other.dirs():
1546 self._repo.ui.warn('%s: %s\n' %
1545 self._repo.ui.warn('%s: %s\n' %
1547 (self._repo.dirstate.pathto(f), msg))
1546 (self._repo.dirstate.pathto(f), msg))
1548 match.bad = bad
1547 match.bad = bad
1549 return match
1548 return match
1550
1549
1551 class committablefilectx(basefilectx):
1550 class committablefilectx(basefilectx):
1552 """A committablefilectx provides common functionality for a file context
1551 """A committablefilectx provides common functionality for a file context
1553 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1552 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1554 def __init__(self, repo, path, filelog=None, ctx=None):
1553 def __init__(self, repo, path, filelog=None, ctx=None):
1555 self._repo = repo
1554 self._repo = repo
1556 self._path = path
1555 self._path = path
1557 self._changeid = None
1556 self._changeid = None
1558 self._filerev = self._filenode = None
1557 self._filerev = self._filenode = None
1559
1558
1560 if filelog is not None:
1559 if filelog is not None:
1561 self._filelog = filelog
1560 self._filelog = filelog
1562 if ctx:
1561 if ctx:
1563 self._changectx = ctx
1562 self._changectx = ctx
1564
1563
1565 def __nonzero__(self):
1564 def __nonzero__(self):
1566 return True
1565 return True
1567
1566
1568 def parents(self):
1567 def parents(self):
1569 '''return parent filectxs, following copies if necessary'''
1568 '''return parent filectxs, following copies if necessary'''
1570 def filenode(ctx, path):
1569 def filenode(ctx, path):
1571 return ctx._manifest.get(path, nullid)
1570 return ctx._manifest.get(path, nullid)
1572
1571
1573 path = self._path
1572 path = self._path
1574 fl = self._filelog
1573 fl = self._filelog
1575 pcl = self._changectx._parents
1574 pcl = self._changectx._parents
1576 renamed = self.renamed()
1575 renamed = self.renamed()
1577
1576
1578 if renamed:
1577 if renamed:
1579 pl = [renamed + (None,)]
1578 pl = [renamed + (None,)]
1580 else:
1579 else:
1581 pl = [(path, filenode(pcl[0], path), fl)]
1580 pl = [(path, filenode(pcl[0], path), fl)]
1582
1581
1583 for pc in pcl[1:]:
1582 for pc in pcl[1:]:
1584 pl.append((path, filenode(pc, path), fl))
1583 pl.append((path, filenode(pc, path), fl))
1585
1584
1586 return [filectx(self._repo, p, fileid=n, filelog=l)
1585 return [filectx(self._repo, p, fileid=n, filelog=l)
1587 for p, n, l in pl if n != nullid]
1586 for p, n, l in pl if n != nullid]
1588
1587
1589 def children(self):
1588 def children(self):
1590 return []
1589 return []
1591
1590
1592 class workingfilectx(committablefilectx):
1591 class workingfilectx(committablefilectx):
1593 """A workingfilectx object makes access to data related to a particular
1592 """A workingfilectx object makes access to data related to a particular
1594 file in the working directory convenient."""
1593 file in the working directory convenient."""
1595 def __init__(self, repo, path, filelog=None, workingctx=None):
1594 def __init__(self, repo, path, filelog=None, workingctx=None):
1596 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1595 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1597
1596
1598 @propertycache
1597 @propertycache
1599 def _changectx(self):
1598 def _changectx(self):
1600 return workingctx(self._repo)
1599 return workingctx(self._repo)
1601
1600
1602 def data(self):
1601 def data(self):
1603 return self._repo.wread(self._path)
1602 return self._repo.wread(self._path)
1604 def renamed(self):
1603 def renamed(self):
1605 rp = self._repo.dirstate.copied(self._path)
1604 rp = self._repo.dirstate.copied(self._path)
1606 if not rp:
1605 if not rp:
1607 return None
1606 return None
1608 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1607 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1609
1608
1610 def size(self):
1609 def size(self):
1611 return self._repo.wvfs.lstat(self._path).st_size
1610 return self._repo.wvfs.lstat(self._path).st_size
1612 def date(self):
1611 def date(self):
1613 t, tz = self._changectx.date()
1612 t, tz = self._changectx.date()
1614 try:
1613 try:
1615 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1614 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1616 except OSError, err:
1615 except OSError, err:
1617 if err.errno != errno.ENOENT:
1616 if err.errno != errno.ENOENT:
1618 raise
1617 raise
1619 return (t, tz)
1618 return (t, tz)
1620
1619
1621 def cmp(self, fctx):
1620 def cmp(self, fctx):
1622 """compare with other file context
1621 """compare with other file context
1623
1622
1624 returns True if different than fctx.
1623 returns True if different than fctx.
1625 """
1624 """
1626 # fctx should be a filectx (not a workingfilectx)
1625 # fctx should be a filectx (not a workingfilectx)
1627 # invert comparison to reuse the same code path
1626 # invert comparison to reuse the same code path
1628 return fctx.cmp(self)
1627 return fctx.cmp(self)
1629
1628
1630 def remove(self, ignoremissing=False):
1629 def remove(self, ignoremissing=False):
1631 """wraps unlink for a repo's working directory"""
1630 """wraps unlink for a repo's working directory"""
1632 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1631 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1633
1632
1634 def write(self, data, flags):
1633 def write(self, data, flags):
1635 """wraps repo.wwrite"""
1634 """wraps repo.wwrite"""
1636 self._repo.wwrite(self._path, data, flags)
1635 self._repo.wwrite(self._path, data, flags)
1637
1636
1638 class workingcommitctx(workingctx):
1637 class workingcommitctx(workingctx):
1639 """A workingcommitctx object makes access to data related to
1638 """A workingcommitctx object makes access to data related to
1640 the revision being committed convenient.
1639 the revision being committed convenient.
1641
1640
1642 This hides changes in the working directory, if they aren't
1641 This hides changes in the working directory, if they aren't
1643 committed in this context.
1642 committed in this context.
1644 """
1643 """
1645 def __init__(self, repo, changes,
1644 def __init__(self, repo, changes,
1646 text="", user=None, date=None, extra=None):
1645 text="", user=None, date=None, extra=None):
1647 super(workingctx, self).__init__(repo, text, user, date, extra,
1646 super(workingctx, self).__init__(repo, text, user, date, extra,
1648 changes)
1647 changes)
1649
1648
1650 def _buildstatus(self, other, s, match,
1649 def _buildstatus(self, other, s, match,
1651 listignored, listclean, listunknown):
1650 listignored, listclean, listunknown):
1652 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1651 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1653 """
1652 """
1654 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1653 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1655 if other != self._repo['.']:
1654 if other != self._repo['.']:
1656 # workingctx._buildstatus doesn't change self._status in this case
1655 # workingctx._buildstatus doesn't change self._status in this case
1657 superself = super(workingcommitctx, self)
1656 superself = super(workingcommitctx, self)
1658 s = superself._buildstatus(other, s, match,
1657 s = superself._buildstatus(other, s, match,
1659 listignored, listclean, listunknown)
1658 listignored, listclean, listunknown)
1660 return s
1659 return s
1661
1660
1662 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1661 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1663 unknown=False):
1662 unknown=False):
1664 """Return matched files only in ``self._status``
1663 """Return matched files only in ``self._status``
1665
1664
1666 Uncommitted files appear "clean" via this context, even if
1665 Uncommitted files appear "clean" via this context, even if
1667 they aren't actually so in the working directory.
1666 they aren't actually so in the working directory.
1668 """
1667 """
1669 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1668 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1670 if clean:
1669 if clean:
1671 clean = [f for f in self._manifest if f not in self._changedset]
1670 clean = [f for f in self._manifest if f not in self._changedset]
1672 else:
1671 else:
1673 clean = []
1672 clean = []
1674 return scmutil.status([f for f in self._status.modified if match(f)],
1673 return scmutil.status([f for f in self._status.modified if match(f)],
1675 [f for f in self._status.added if match(f)],
1674 [f for f in self._status.added if match(f)],
1676 [f for f in self._status.removed if match(f)],
1675 [f for f in self._status.removed if match(f)],
1677 [], [], [], clean)
1676 [], [], [], clean)
1678
1677
1679 @propertycache
1678 @propertycache
1680 def _changedset(self):
1679 def _changedset(self):
1681 """Return the set of files changed in this context
1680 """Return the set of files changed in this context
1682 """
1681 """
1683 changed = set(self._status.modified)
1682 changed = set(self._status.modified)
1684 changed.update(self._status.added)
1683 changed.update(self._status.added)
1685 changed.update(self._status.removed)
1684 changed.update(self._status.removed)
1686 return changed
1685 return changed
1687
1686
1688 class memctx(committablectx):
1687 class memctx(committablectx):
1689 """Use memctx to perform in-memory commits via localrepo.commitctx().
1688 """Use memctx to perform in-memory commits via localrepo.commitctx().
1690
1689
1691 Revision information is supplied at initialization time while
1690 Revision information is supplied at initialization time while
1692 related files data and is made available through a callback
1691 related files data and is made available through a callback
1693 mechanism. 'repo' is the current localrepo, 'parents' is a
1692 mechanism. 'repo' is the current localrepo, 'parents' is a
1694 sequence of two parent revisions identifiers (pass None for every
1693 sequence of two parent revisions identifiers (pass None for every
1695 missing parent), 'text' is the commit message and 'files' lists
1694 missing parent), 'text' is the commit message and 'files' lists
1696 names of files touched by the revision (normalized and relative to
1695 names of files touched by the revision (normalized and relative to
1697 repository root).
1696 repository root).
1698
1697
1699 filectxfn(repo, memctx, path) is a callable receiving the
1698 filectxfn(repo, memctx, path) is a callable receiving the
1700 repository, the current memctx object and the normalized path of
1699 repository, the current memctx object and the normalized path of
1701 requested file, relative to repository root. It is fired by the
1700 requested file, relative to repository root. It is fired by the
1702 commit function for every file in 'files', but calls order is
1701 commit function for every file in 'files', but calls order is
1703 undefined. If the file is available in the revision being
1702 undefined. If the file is available in the revision being
1704 committed (updated or added), filectxfn returns a memfilectx
1703 committed (updated or added), filectxfn returns a memfilectx
1705 object. If the file was removed, filectxfn raises an
1704 object. If the file was removed, filectxfn raises an
1706 IOError. Moved files are represented by marking the source file
1705 IOError. Moved files are represented by marking the source file
1707 removed and the new file added with copy information (see
1706 removed and the new file added with copy information (see
1708 memfilectx).
1707 memfilectx).
1709
1708
1710 user receives the committer name and defaults to current
1709 user receives the committer name and defaults to current
1711 repository username, date is the commit date in any format
1710 repository username, date is the commit date in any format
1712 supported by util.parsedate() and defaults to current date, extra
1711 supported by util.parsedate() and defaults to current date, extra
1713 is a dictionary of metadata or is left empty.
1712 is a dictionary of metadata or is left empty.
1714 """
1713 """
1715
1714
1716 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1715 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1717 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1716 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1718 # this field to determine what to do in filectxfn.
1717 # this field to determine what to do in filectxfn.
1719 _returnnoneformissingfiles = True
1718 _returnnoneformissingfiles = True
1720
1719
1721 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1720 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1722 date=None, extra=None, editor=False):
1721 date=None, extra=None, editor=False):
1723 super(memctx, self).__init__(repo, text, user, date, extra)
1722 super(memctx, self).__init__(repo, text, user, date, extra)
1724 self._rev = None
1723 self._rev = None
1725 self._node = None
1724 self._node = None
1726 parents = [(p or nullid) for p in parents]
1725 parents = [(p or nullid) for p in parents]
1727 p1, p2 = parents
1726 p1, p2 = parents
1728 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1727 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1729 files = sorted(set(files))
1728 files = sorted(set(files))
1730 self._files = files
1729 self._files = files
1731 self.substate = {}
1730 self.substate = {}
1732
1731
1733 # if store is not callable, wrap it in a function
1732 # if store is not callable, wrap it in a function
1734 if not callable(filectxfn):
1733 if not callable(filectxfn):
1735 def getfilectx(repo, memctx, path):
1734 def getfilectx(repo, memctx, path):
1736 fctx = filectxfn[path]
1735 fctx = filectxfn[path]
1737 # this is weird but apparently we only keep track of one parent
1736 # this is weird but apparently we only keep track of one parent
1738 # (why not only store that instead of a tuple?)
1737 # (why not only store that instead of a tuple?)
1739 copied = fctx.renamed()
1738 copied = fctx.renamed()
1740 if copied:
1739 if copied:
1741 copied = copied[0]
1740 copied = copied[0]
1742 return memfilectx(repo, path, fctx.data(),
1741 return memfilectx(repo, path, fctx.data(),
1743 islink=fctx.islink(), isexec=fctx.isexec(),
1742 islink=fctx.islink(), isexec=fctx.isexec(),
1744 copied=copied, memctx=memctx)
1743 copied=copied, memctx=memctx)
1745 self._filectxfn = getfilectx
1744 self._filectxfn = getfilectx
1746 else:
1745 else:
1747 # "util.cachefunc" reduces invocation of possibly expensive
1746 # "util.cachefunc" reduces invocation of possibly expensive
1748 # "filectxfn" for performance (e.g. converting from another VCS)
1747 # "filectxfn" for performance (e.g. converting from another VCS)
1749 self._filectxfn = util.cachefunc(filectxfn)
1748 self._filectxfn = util.cachefunc(filectxfn)
1750
1749
1751 self._extra = extra and extra.copy() or {}
1750 self._extra = extra and extra.copy() or {}
1752 if self._extra.get('branch', '') == '':
1751 if self._extra.get('branch', '') == '':
1753 self._extra['branch'] = 'default'
1752 self._extra['branch'] = 'default'
1754
1753
1755 if editor:
1754 if editor:
1756 self._text = editor(self._repo, self, [])
1755 self._text = editor(self._repo, self, [])
1757 self._repo.savecommitmessage(self._text)
1756 self._repo.savecommitmessage(self._text)
1758
1757
1759 def filectx(self, path, filelog=None):
1758 def filectx(self, path, filelog=None):
1760 """get a file context from the working directory
1759 """get a file context from the working directory
1761
1760
1762 Returns None if file doesn't exist and should be removed."""
1761 Returns None if file doesn't exist and should be removed."""
1763 return self._filectxfn(self._repo, self, path)
1762 return self._filectxfn(self._repo, self, path)
1764
1763
1765 def commit(self):
1764 def commit(self):
1766 """commit context to the repo"""
1765 """commit context to the repo"""
1767 return self._repo.commitctx(self)
1766 return self._repo.commitctx(self)
1768
1767
1769 @propertycache
1768 @propertycache
1770 def _manifest(self):
1769 def _manifest(self):
1771 """generate a manifest based on the return values of filectxfn"""
1770 """generate a manifest based on the return values of filectxfn"""
1772
1771
1773 # keep this simple for now; just worry about p1
1772 # keep this simple for now; just worry about p1
1774 pctx = self._parents[0]
1773 pctx = self._parents[0]
1775 man = pctx.manifest().copy()
1774 man = pctx.manifest().copy()
1776
1775
1777 for f in self._status.modified:
1776 for f in self._status.modified:
1778 p1node = nullid
1777 p1node = nullid
1779 p2node = nullid
1778 p2node = nullid
1780 p = pctx[f].parents() # if file isn't in pctx, check p2?
1779 p = pctx[f].parents() # if file isn't in pctx, check p2?
1781 if len(p) > 0:
1780 if len(p) > 0:
1782 p1node = p[0].node()
1781 p1node = p[0].node()
1783 if len(p) > 1:
1782 if len(p) > 1:
1784 p2node = p[1].node()
1783 p2node = p[1].node()
1785 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1784 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1786
1785
1787 for f in self._status.added:
1786 for f in self._status.added:
1788 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1787 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1789
1788
1790 for f in self._status.removed:
1789 for f in self._status.removed:
1791 if f in man:
1790 if f in man:
1792 del man[f]
1791 del man[f]
1793
1792
1794 return man
1793 return man
1795
1794
1796 @propertycache
1795 @propertycache
1797 def _status(self):
1796 def _status(self):
1798 """Calculate exact status from ``files`` specified at construction
1797 """Calculate exact status from ``files`` specified at construction
1799 """
1798 """
1800 man1 = self.p1().manifest()
1799 man1 = self.p1().manifest()
1801 p2 = self._parents[1]
1800 p2 = self._parents[1]
1802 # "1 < len(self._parents)" can't be used for checking
1801 # "1 < len(self._parents)" can't be used for checking
1803 # existence of the 2nd parent, because "memctx._parents" is
1802 # existence of the 2nd parent, because "memctx._parents" is
1804 # explicitly initialized by the list, of which length is 2.
1803 # explicitly initialized by the list, of which length is 2.
1805 if p2.node() != nullid:
1804 if p2.node() != nullid:
1806 man2 = p2.manifest()
1805 man2 = p2.manifest()
1807 managing = lambda f: f in man1 or f in man2
1806 managing = lambda f: f in man1 or f in man2
1808 else:
1807 else:
1809 managing = lambda f: f in man1
1808 managing = lambda f: f in man1
1810
1809
1811 modified, added, removed = [], [], []
1810 modified, added, removed = [], [], []
1812 for f in self._files:
1811 for f in self._files:
1813 if not managing(f):
1812 if not managing(f):
1814 added.append(f)
1813 added.append(f)
1815 elif self[f]:
1814 elif self[f]:
1816 modified.append(f)
1815 modified.append(f)
1817 else:
1816 else:
1818 removed.append(f)
1817 removed.append(f)
1819
1818
1820 return scmutil.status(modified, added, removed, [], [], [], [])
1819 return scmutil.status(modified, added, removed, [], [], [], [])
1821
1820
1822 class memfilectx(committablefilectx):
1821 class memfilectx(committablefilectx):
1823 """memfilectx represents an in-memory file to commit.
1822 """memfilectx represents an in-memory file to commit.
1824
1823
1825 See memctx and committablefilectx for more details.
1824 See memctx and committablefilectx for more details.
1826 """
1825 """
1827 def __init__(self, repo, path, data, islink=False,
1826 def __init__(self, repo, path, data, islink=False,
1828 isexec=False, copied=None, memctx=None):
1827 isexec=False, copied=None, memctx=None):
1829 """
1828 """
1830 path is the normalized file path relative to repository root.
1829 path is the normalized file path relative to repository root.
1831 data is the file content as a string.
1830 data is the file content as a string.
1832 islink is True if the file is a symbolic link.
1831 islink is True if the file is a symbolic link.
1833 isexec is True if the file is executable.
1832 isexec is True if the file is executable.
1834 copied is the source file path if current file was copied in the
1833 copied is the source file path if current file was copied in the
1835 revision being committed, or None."""
1834 revision being committed, or None."""
1836 super(memfilectx, self).__init__(repo, path, None, memctx)
1835 super(memfilectx, self).__init__(repo, path, None, memctx)
1837 self._data = data
1836 self._data = data
1838 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1837 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1839 self._copied = None
1838 self._copied = None
1840 if copied:
1839 if copied:
1841 self._copied = (copied, nullid)
1840 self._copied = (copied, nullid)
1842
1841
1843 def data(self):
1842 def data(self):
1844 return self._data
1843 return self._data
1845 def size(self):
1844 def size(self):
1846 return len(self.data())
1845 return len(self.data())
1847 def flags(self):
1846 def flags(self):
1848 return self._flags
1847 return self._flags
1849 def renamed(self):
1848 def renamed(self):
1850 return self._copied
1849 return self._copied
1851
1850
1852 def remove(self, ignoremissing=False):
1851 def remove(self, ignoremissing=False):
1853 """wraps unlink for a repo's working directory"""
1852 """wraps unlink for a repo's working directory"""
1854 # need to figure out what to do here
1853 # need to figure out what to do here
1855 del self._changectx[self._path]
1854 del self._changectx[self._path]
1856
1855
1857 def write(self, data, flags):
1856 def write(self, data, flags):
1858 """wraps repo.wwrite"""
1857 """wraps repo.wwrite"""
1859 self._data = data
1858 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now