##// END OF EJS Templates
status: don't override _buildstatus() in workingcommitctx...
Martin von Zweigbergk -
r23777:a4951ade default
parent child Browse files
Show More
@@ -1,1859 +1,1847 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 """return the first ancestor of <srcrev> introducting <fnode>
26 """return the first ancestor of <srcrev> introducting <fnode>
27
27
28 If the linkrev of the file revision does not point to an ancestor of
28 If the linkrev of the file revision does not point to an ancestor of
29 srcrev, we'll walk down the ancestors until we find one introducing this
29 srcrev, we'll walk down the ancestors until we find one introducing this
30 file revision.
30 file revision.
31
31
32 :repo: a localrepository object (used to access changelog and manifest)
32 :repo: a localrepository object (used to access changelog and manifest)
33 :path: the file path
33 :path: the file path
34 :fnode: the nodeid of the file revision
34 :fnode: the nodeid of the file revision
35 :filelog: the filelog of this path
35 :filelog: the filelog of this path
36 :srcrev: the changeset revision we search ancestors from
36 :srcrev: the changeset revision we search ancestors from
37 :inclusive: if true, the src revision will also be checked
37 :inclusive: if true, the src revision will also be checked
38 """
38 """
39 cl = repo.unfiltered().changelog
39 cl = repo.unfiltered().changelog
40 ma = repo.manifest
40 ma = repo.manifest
41 # fetch the linkrev
41 # fetch the linkrev
42 fr = filelog.rev(fnode)
42 fr = filelog.rev(fnode)
43 lkr = filelog.linkrev(fr)
43 lkr = filelog.linkrev(fr)
44 # check if this linkrev is an ancestor of srcrev
44 # check if this linkrev is an ancestor of srcrev
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 if lkr not in anc:
46 if lkr not in anc:
47 for a in anc:
47 for a in anc:
48 ac = cl.read(a) # get changeset data (we avoid object creation).
48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 if path in ac[3]: # checking the 'files' field.
49 if path in ac[3]: # checking the 'files' field.
50 # The file has been touched, check if the content is similar
50 # The file has been touched, check if the content is similar
51 # to the one we search for.
51 # to the one we search for.
52 if fnode == ma.readdelta(ac[0]).get(path):
52 if fnode == ma.readdelta(ac[0]).get(path):
53 return a
53 return a
54 # In theory, we should never get out of that loop without a result. But
54 # In theory, we should never get out of that loop without a result. But
55 # if manifest uses a buggy file revision (not children of the one it
55 # if manifest uses a buggy file revision (not children of the one it
56 # replaces) we could. Such a buggy situation will likely result is crash
56 # replaces) we could. Such a buggy situation will likely result is crash
57 # somewhere else at to some point.
57 # somewhere else at to some point.
58 return lkr
58 return lkr
59
59
60 class basectx(object):
60 class basectx(object):
61 """A basectx object represents the common logic for its children:
61 """A basectx object represents the common logic for its children:
62 changectx: read-only context that is already present in the repo,
62 changectx: read-only context that is already present in the repo,
63 workingctx: a context that represents the working directory and can
63 workingctx: a context that represents the working directory and can
64 be committed,
64 be committed,
65 memctx: a context that represents changes in-memory and can also
65 memctx: a context that represents changes in-memory and can also
66 be committed."""
66 be committed."""
67 def __new__(cls, repo, changeid='', *args, **kwargs):
67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 if isinstance(changeid, basectx):
68 if isinstance(changeid, basectx):
69 return changeid
69 return changeid
70
70
71 o = super(basectx, cls).__new__(cls)
71 o = super(basectx, cls).__new__(cls)
72
72
73 o._repo = repo
73 o._repo = repo
74 o._rev = nullrev
74 o._rev = nullrev
75 o._node = nullid
75 o._node = nullid
76
76
77 return o
77 return o
78
78
79 def __str__(self):
79 def __str__(self):
80 return short(self.node())
80 return short(self.node())
81
81
82 def __int__(self):
82 def __int__(self):
83 return self.rev()
83 return self.rev()
84
84
85 def __repr__(self):
85 def __repr__(self):
86 return "<%s %s>" % (type(self).__name__, str(self))
86 return "<%s %s>" % (type(self).__name__, str(self))
87
87
88 def __eq__(self, other):
88 def __eq__(self, other):
89 try:
89 try:
90 return type(self) == type(other) and self._rev == other._rev
90 return type(self) == type(other) and self._rev == other._rev
91 except AttributeError:
91 except AttributeError:
92 return False
92 return False
93
93
94 def __ne__(self, other):
94 def __ne__(self, other):
95 return not (self == other)
95 return not (self == other)
96
96
97 def __contains__(self, key):
97 def __contains__(self, key):
98 return key in self._manifest
98 return key in self._manifest
99
99
100 def __getitem__(self, key):
100 def __getitem__(self, key):
101 return self.filectx(key)
101 return self.filectx(key)
102
102
103 def __iter__(self):
103 def __iter__(self):
104 for f in sorted(self._manifest):
104 for f in sorted(self._manifest):
105 yield f
105 yield f
106
106
107 def _manifestmatches(self, match, s):
107 def _manifestmatches(self, match, s):
108 """generate a new manifest filtered by the match argument
108 """generate a new manifest filtered by the match argument
109
109
110 This method is for internal use only and mainly exists to provide an
110 This method is for internal use only and mainly exists to provide an
111 object oriented way for other contexts to customize the manifest
111 object oriented way for other contexts to customize the manifest
112 generation.
112 generation.
113 """
113 """
114 return self.manifest().matches(match)
114 return self.manifest().matches(match)
115
115
116 def _matchstatus(self, other, match):
116 def _matchstatus(self, other, match):
117 """return match.always if match is none
117 """return match.always if match is none
118
118
119 This internal method provides a way for child objects to override the
119 This internal method provides a way for child objects to override the
120 match operator.
120 match operator.
121 """
121 """
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123
123
124 def _buildstatus(self, other, s, match, listignored, listclean,
124 def _buildstatus(self, other, s, match, listignored, listclean,
125 listunknown):
125 listunknown):
126 """build a status with respect to another context"""
126 """build a status with respect to another context"""
127 # Load earliest manifest first for caching reasons. More specifically,
127 # Load earliest manifest first for caching reasons. More specifically,
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 # 1000 and cache it so that when you read 1001, we just need to apply a
130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 # delta to what's in the cache. So that's one full reconstruction + one
131 # delta to what's in the cache. So that's one full reconstruction + one
132 # delta application.
132 # delta application.
133 if self.rev() is not None and self.rev() < other.rev():
133 if self.rev() is not None and self.rev() < other.rev():
134 self.manifest()
134 self.manifest()
135 mf1 = other._manifestmatches(match, s)
135 mf1 = other._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
137
137
138 modified, added = [], []
138 modified, added = [], []
139 removed = []
139 removed = []
140 clean = []
140 clean = []
141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
142 deletedset = set(deleted)
142 deletedset = set(deleted)
143 d = mf1.diff(mf2, clean=listclean)
143 d = mf1.diff(mf2, clean=listclean)
144 for fn, value in d.iteritems():
144 for fn, value in d.iteritems():
145 if fn in deletedset:
145 if fn in deletedset:
146 continue
146 continue
147 if value is None:
147 if value is None:
148 clean.append(fn)
148 clean.append(fn)
149 continue
149 continue
150 (node1, flag1), (node2, flag2) = value
150 (node1, flag1), (node2, flag2) = value
151 if node1 is None:
151 if node1 is None:
152 added.append(fn)
152 added.append(fn)
153 elif node2 is None:
153 elif node2 is None:
154 removed.append(fn)
154 removed.append(fn)
155 elif node2 != _newnode:
155 elif node2 != _newnode:
156 # The file was not a new file in mf2, so an entry
156 # The file was not a new file in mf2, so an entry
157 # from diff is really a difference.
157 # from diff is really a difference.
158 modified.append(fn)
158 modified.append(fn)
159 elif self[fn].cmp(other[fn]):
159 elif self[fn].cmp(other[fn]):
160 # node2 was newnode, but the working file doesn't
160 # node2 was newnode, but the working file doesn't
161 # match the one in mf1.
161 # match the one in mf1.
162 modified.append(fn)
162 modified.append(fn)
163 else:
163 else:
164 clean.append(fn)
164 clean.append(fn)
165
165
166 if removed:
166 if removed:
167 # need to filter files if they are already reported as removed
167 # need to filter files if they are already reported as removed
168 unknown = [fn for fn in unknown if fn not in mf1]
168 unknown = [fn for fn in unknown if fn not in mf1]
169 ignored = [fn for fn in ignored if fn not in mf1]
169 ignored = [fn for fn in ignored if fn not in mf1]
170 # if they're deleted, don't report them as removed
170 # if they're deleted, don't report them as removed
171 removed = [fn for fn in removed if fn not in deletedset]
171 removed = [fn for fn in removed if fn not in deletedset]
172
172
173 return scmutil.status(modified, added, removed, deleted, unknown,
173 return scmutil.status(modified, added, removed, deleted, unknown,
174 ignored, clean)
174 ignored, clean)
175
175
176 @propertycache
176 @propertycache
177 def substate(self):
177 def substate(self):
178 return subrepo.state(self, self._repo.ui)
178 return subrepo.state(self, self._repo.ui)
179
179
180 def subrev(self, subpath):
180 def subrev(self, subpath):
181 return self.substate[subpath][1]
181 return self.substate[subpath][1]
182
182
183 def rev(self):
183 def rev(self):
184 return self._rev
184 return self._rev
185 def node(self):
185 def node(self):
186 return self._node
186 return self._node
187 def hex(self):
187 def hex(self):
188 return hex(self.node())
188 return hex(self.node())
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191 def phasestr(self):
191 def phasestr(self):
192 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def getfileset(self, expr):
196 def getfileset(self, expr):
197 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206
206
207 def unstable(self):
207 def unstable(self):
208 """True if the changeset is not obsolete but it's ancestor are"""
208 """True if the changeset is not obsolete but it's ancestor are"""
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210
210
211 def bumped(self):
211 def bumped(self):
212 """True if the changeset try to be a successor of a public changeset
212 """True if the changeset try to be a successor of a public changeset
213
213
214 Only non-public and non-obsolete changesets may be bumped.
214 Only non-public and non-obsolete changesets may be bumped.
215 """
215 """
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217
217
218 def divergent(self):
218 def divergent(self):
219 """Is a successors of a changeset with multiple possible successors set
219 """Is a successors of a changeset with multiple possible successors set
220
220
221 Only non-public and non-obsolete changesets may be divergent.
221 Only non-public and non-obsolete changesets may be divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224
224
225 def troubled(self):
225 def troubled(self):
226 """True if the changeset is either unstable, bumped or divergent"""
226 """True if the changeset is either unstable, bumped or divergent"""
227 return self.unstable() or self.bumped() or self.divergent()
227 return self.unstable() or self.bumped() or self.divergent()
228
228
229 def troubles(self):
229 def troubles(self):
230 """return the list of troubles affecting this changesets.
230 """return the list of troubles affecting this changesets.
231
231
232 Troubles are returned as strings. possible values are:
232 Troubles are returned as strings. possible values are:
233 - unstable,
233 - unstable,
234 - bumped,
234 - bumped,
235 - divergent.
235 - divergent.
236 """
236 """
237 troubles = []
237 troubles = []
238 if self.unstable():
238 if self.unstable():
239 troubles.append('unstable')
239 troubles.append('unstable')
240 if self.bumped():
240 if self.bumped():
241 troubles.append('bumped')
241 troubles.append('bumped')
242 if self.divergent():
242 if self.divergent():
243 troubles.append('divergent')
243 troubles.append('divergent')
244 return troubles
244 return troubles
245
245
246 def parents(self):
246 def parents(self):
247 """return contexts for each parent changeset"""
247 """return contexts for each parent changeset"""
248 return self._parents
248 return self._parents
249
249
250 def p1(self):
250 def p1(self):
251 return self._parents[0]
251 return self._parents[0]
252
252
253 def p2(self):
253 def p2(self):
254 if len(self._parents) == 2:
254 if len(self._parents) == 2:
255 return self._parents[1]
255 return self._parents[1]
256 return changectx(self._repo, -1)
256 return changectx(self._repo, -1)
257
257
258 def _fileinfo(self, path):
258 def _fileinfo(self, path):
259 if '_manifest' in self.__dict__:
259 if '_manifest' in self.__dict__:
260 try:
260 try:
261 return self._manifest[path], self._manifest.flags(path)
261 return self._manifest[path], self._manifest.flags(path)
262 except KeyError:
262 except KeyError:
263 raise error.ManifestLookupError(self._node, path,
263 raise error.ManifestLookupError(self._node, path,
264 _('not found in manifest'))
264 _('not found in manifest'))
265 if '_manifestdelta' in self.__dict__ or path in self.files():
265 if '_manifestdelta' in self.__dict__ or path in self.files():
266 if path in self._manifestdelta:
266 if path in self._manifestdelta:
267 return (self._manifestdelta[path],
267 return (self._manifestdelta[path],
268 self._manifestdelta.flags(path))
268 self._manifestdelta.flags(path))
269 node, flag = self._repo.manifest.find(self._changeset[0], path)
269 node, flag = self._repo.manifest.find(self._changeset[0], path)
270 if not node:
270 if not node:
271 raise error.ManifestLookupError(self._node, path,
271 raise error.ManifestLookupError(self._node, path,
272 _('not found in manifest'))
272 _('not found in manifest'))
273
273
274 return node, flag
274 return node, flag
275
275
276 def filenode(self, path):
276 def filenode(self, path):
277 return self._fileinfo(path)[0]
277 return self._fileinfo(path)[0]
278
278
279 def flags(self, path):
279 def flags(self, path):
280 try:
280 try:
281 return self._fileinfo(path)[1]
281 return self._fileinfo(path)[1]
282 except error.LookupError:
282 except error.LookupError:
283 return ''
283 return ''
284
284
285 def sub(self, path):
285 def sub(self, path):
286 return subrepo.subrepo(self, path)
286 return subrepo.subrepo(self, path)
287
287
288 def match(self, pats=[], include=None, exclude=None, default='glob'):
288 def match(self, pats=[], include=None, exclude=None, default='glob'):
289 r = self._repo
289 r = self._repo
290 return matchmod.match(r.root, r.getcwd(), pats,
290 return matchmod.match(r.root, r.getcwd(), pats,
291 include, exclude, default,
291 include, exclude, default,
292 auditor=r.auditor, ctx=self)
292 auditor=r.auditor, ctx=self)
293
293
294 def diff(self, ctx2=None, match=None, **opts):
294 def diff(self, ctx2=None, match=None, **opts):
295 """Returns a diff generator for the given contexts and matcher"""
295 """Returns a diff generator for the given contexts and matcher"""
296 if ctx2 is None:
296 if ctx2 is None:
297 ctx2 = self.p1()
297 ctx2 = self.p1()
298 if ctx2 is not None:
298 if ctx2 is not None:
299 ctx2 = self._repo[ctx2]
299 ctx2 = self._repo[ctx2]
300 diffopts = patch.diffopts(self._repo.ui, opts)
300 diffopts = patch.diffopts(self._repo.ui, opts)
301 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
301 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
302
302
303 @propertycache
303 @propertycache
304 def _dirs(self):
304 def _dirs(self):
305 return scmutil.dirs(self._manifest)
305 return scmutil.dirs(self._manifest)
306
306
307 def dirs(self):
307 def dirs(self):
308 return self._dirs
308 return self._dirs
309
309
310 def dirty(self, missing=False, merge=True, branch=True):
310 def dirty(self, missing=False, merge=True, branch=True):
311 return False
311 return False
312
312
313 def status(self, other=None, match=None, listignored=False,
313 def status(self, other=None, match=None, listignored=False,
314 listclean=False, listunknown=False, listsubrepos=False):
314 listclean=False, listunknown=False, listsubrepos=False):
315 """return status of files between two nodes or node and working
315 """return status of files between two nodes or node and working
316 directory.
316 directory.
317
317
318 If other is None, compare this node with working directory.
318 If other is None, compare this node with working directory.
319
319
320 returns (modified, added, removed, deleted, unknown, ignored, clean)
320 returns (modified, added, removed, deleted, unknown, ignored, clean)
321 """
321 """
322
322
323 ctx1 = self
323 ctx1 = self
324 ctx2 = self._repo[other]
324 ctx2 = self._repo[other]
325
325
326 # This next code block is, admittedly, fragile logic that tests for
326 # This next code block is, admittedly, fragile logic that tests for
327 # reversing the contexts and wouldn't need to exist if it weren't for
327 # reversing the contexts and wouldn't need to exist if it weren't for
328 # the fast (and common) code path of comparing the working directory
328 # the fast (and common) code path of comparing the working directory
329 # with its first parent.
329 # with its first parent.
330 #
330 #
331 # What we're aiming for here is the ability to call:
331 # What we're aiming for here is the ability to call:
332 #
332 #
333 # workingctx.status(parentctx)
333 # workingctx.status(parentctx)
334 #
334 #
335 # If we always built the manifest for each context and compared those,
335 # If we always built the manifest for each context and compared those,
336 # then we'd be done. But the special case of the above call means we
336 # then we'd be done. But the special case of the above call means we
337 # just copy the manifest of the parent.
337 # just copy the manifest of the parent.
338 reversed = False
338 reversed = False
339 if (not isinstance(ctx1, changectx)
339 if (not isinstance(ctx1, changectx)
340 and isinstance(ctx2, changectx)):
340 and isinstance(ctx2, changectx)):
341 reversed = True
341 reversed = True
342 ctx1, ctx2 = ctx2, ctx1
342 ctx1, ctx2 = ctx2, ctx1
343
343
344 match = ctx2._matchstatus(ctx1, match)
344 match = ctx2._matchstatus(ctx1, match)
345 r = scmutil.status([], [], [], [], [], [], [])
345 r = scmutil.status([], [], [], [], [], [], [])
346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
347 listunknown)
347 listunknown)
348
348
349 if reversed:
349 if reversed:
350 # Reverse added and removed. Clear deleted, unknown and ignored as
350 # Reverse added and removed. Clear deleted, unknown and ignored as
351 # these make no sense to reverse.
351 # these make no sense to reverse.
352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
353 r.clean)
353 r.clean)
354
354
355 if listsubrepos:
355 if listsubrepos:
356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
357 rev2 = ctx2.subrev(subpath)
357 rev2 = ctx2.subrev(subpath)
358 try:
358 try:
359 submatch = matchmod.narrowmatcher(subpath, match)
359 submatch = matchmod.narrowmatcher(subpath, match)
360 s = sub.status(rev2, match=submatch, ignored=listignored,
360 s = sub.status(rev2, match=submatch, ignored=listignored,
361 clean=listclean, unknown=listunknown,
361 clean=listclean, unknown=listunknown,
362 listsubrepos=True)
362 listsubrepos=True)
363 for rfiles, sfiles in zip(r, s):
363 for rfiles, sfiles in zip(r, s):
364 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
364 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
365 except error.LookupError:
365 except error.LookupError:
366 self._repo.ui.status(_("skipping missing "
366 self._repo.ui.status(_("skipping missing "
367 "subrepository: %s\n") % subpath)
367 "subrepository: %s\n") % subpath)
368
368
369 for l in r:
369 for l in r:
370 l.sort()
370 l.sort()
371
371
372 return r
372 return r
373
373
374
374
375 def makememctx(repo, parents, text, user, date, branch, files, store,
375 def makememctx(repo, parents, text, user, date, branch, files, store,
376 editor=None):
376 editor=None):
377 def getfilectx(repo, memctx, path):
377 def getfilectx(repo, memctx, path):
378 data, mode, copied = store.getfile(path)
378 data, mode, copied = store.getfile(path)
379 if data is None:
379 if data is None:
380 return None
380 return None
381 islink, isexec = mode
381 islink, isexec = mode
382 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
382 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
383 copied=copied, memctx=memctx)
383 copied=copied, memctx=memctx)
384 extra = {}
384 extra = {}
385 if branch:
385 if branch:
386 extra['branch'] = encoding.fromlocal(branch)
386 extra['branch'] = encoding.fromlocal(branch)
387 ctx = memctx(repo, parents, text, files, getfilectx, user,
387 ctx = memctx(repo, parents, text, files, getfilectx, user,
388 date, extra, editor)
388 date, extra, editor)
389 return ctx
389 return ctx
390
390
391 class changectx(basectx):
391 class changectx(basectx):
392 """A changecontext object makes access to data related to a particular
392 """A changecontext object makes access to data related to a particular
393 changeset convenient. It represents a read-only context already present in
393 changeset convenient. It represents a read-only context already present in
394 the repo."""
394 the repo."""
395 def __init__(self, repo, changeid=''):
395 def __init__(self, repo, changeid=''):
396 """changeid is a revision number, node, or tag"""
396 """changeid is a revision number, node, or tag"""
397
397
398 # since basectx.__new__ already took care of copying the object, we
398 # since basectx.__new__ already took care of copying the object, we
399 # don't need to do anything in __init__, so we just exit here
399 # don't need to do anything in __init__, so we just exit here
400 if isinstance(changeid, basectx):
400 if isinstance(changeid, basectx):
401 return
401 return
402
402
403 if changeid == '':
403 if changeid == '':
404 changeid = '.'
404 changeid = '.'
405 self._repo = repo
405 self._repo = repo
406
406
407 try:
407 try:
408 if isinstance(changeid, int):
408 if isinstance(changeid, int):
409 self._node = repo.changelog.node(changeid)
409 self._node = repo.changelog.node(changeid)
410 self._rev = changeid
410 self._rev = changeid
411 return
411 return
412 if isinstance(changeid, long):
412 if isinstance(changeid, long):
413 changeid = str(changeid)
413 changeid = str(changeid)
414 if changeid == '.':
414 if changeid == '.':
415 self._node = repo.dirstate.p1()
415 self._node = repo.dirstate.p1()
416 self._rev = repo.changelog.rev(self._node)
416 self._rev = repo.changelog.rev(self._node)
417 return
417 return
418 if changeid == 'null':
418 if changeid == 'null':
419 self._node = nullid
419 self._node = nullid
420 self._rev = nullrev
420 self._rev = nullrev
421 return
421 return
422 if changeid == 'tip':
422 if changeid == 'tip':
423 self._node = repo.changelog.tip()
423 self._node = repo.changelog.tip()
424 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
425 return
425 return
426 if len(changeid) == 20:
426 if len(changeid) == 20:
427 try:
427 try:
428 self._node = changeid
428 self._node = changeid
429 self._rev = repo.changelog.rev(changeid)
429 self._rev = repo.changelog.rev(changeid)
430 return
430 return
431 except error.FilteredRepoLookupError:
431 except error.FilteredRepoLookupError:
432 raise
432 raise
433 except LookupError:
433 except LookupError:
434 pass
434 pass
435
435
436 try:
436 try:
437 r = int(changeid)
437 r = int(changeid)
438 if str(r) != changeid:
438 if str(r) != changeid:
439 raise ValueError
439 raise ValueError
440 l = len(repo.changelog)
440 l = len(repo.changelog)
441 if r < 0:
441 if r < 0:
442 r += l
442 r += l
443 if r < 0 or r >= l:
443 if r < 0 or r >= l:
444 raise ValueError
444 raise ValueError
445 self._rev = r
445 self._rev = r
446 self._node = repo.changelog.node(r)
446 self._node = repo.changelog.node(r)
447 return
447 return
448 except error.FilteredIndexError:
448 except error.FilteredIndexError:
449 raise
449 raise
450 except (ValueError, OverflowError, IndexError):
450 except (ValueError, OverflowError, IndexError):
451 pass
451 pass
452
452
453 if len(changeid) == 40:
453 if len(changeid) == 40:
454 try:
454 try:
455 self._node = bin(changeid)
455 self._node = bin(changeid)
456 self._rev = repo.changelog.rev(self._node)
456 self._rev = repo.changelog.rev(self._node)
457 return
457 return
458 except error.FilteredLookupError:
458 except error.FilteredLookupError:
459 raise
459 raise
460 except (TypeError, LookupError):
460 except (TypeError, LookupError):
461 pass
461 pass
462
462
463 # lookup bookmarks through the name interface
463 # lookup bookmarks through the name interface
464 try:
464 try:
465 self._node = repo.names.singlenode(repo, changeid)
465 self._node = repo.names.singlenode(repo, changeid)
466 self._rev = repo.changelog.rev(self._node)
466 self._rev = repo.changelog.rev(self._node)
467 return
467 return
468 except KeyError:
468 except KeyError:
469 pass
469 pass
470 except error.FilteredRepoLookupError:
470 except error.FilteredRepoLookupError:
471 raise
471 raise
472 except error.RepoLookupError:
472 except error.RepoLookupError:
473 pass
473 pass
474
474
475 self._node = repo.unfiltered().changelog._partialmatch(changeid)
475 self._node = repo.unfiltered().changelog._partialmatch(changeid)
476 if self._node is not None:
476 if self._node is not None:
477 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
478 return
478 return
479
479
480 # lookup failed
480 # lookup failed
481 # check if it might have come from damaged dirstate
481 # check if it might have come from damaged dirstate
482 #
482 #
483 # XXX we could avoid the unfiltered if we had a recognizable
483 # XXX we could avoid the unfiltered if we had a recognizable
484 # exception for filtered changeset access
484 # exception for filtered changeset access
485 if changeid in repo.unfiltered().dirstate.parents():
485 if changeid in repo.unfiltered().dirstate.parents():
486 msg = _("working directory has unknown parent '%s'!")
486 msg = _("working directory has unknown parent '%s'!")
487 raise error.Abort(msg % short(changeid))
487 raise error.Abort(msg % short(changeid))
488 try:
488 try:
489 if len(changeid) == 20:
489 if len(changeid) == 20:
490 changeid = hex(changeid)
490 changeid = hex(changeid)
491 except TypeError:
491 except TypeError:
492 pass
492 pass
493 except (error.FilteredIndexError, error.FilteredLookupError,
493 except (error.FilteredIndexError, error.FilteredLookupError,
494 error.FilteredRepoLookupError):
494 error.FilteredRepoLookupError):
495 if repo.filtername == 'visible':
495 if repo.filtername == 'visible':
496 msg = _("hidden revision '%s'") % changeid
496 msg = _("hidden revision '%s'") % changeid
497 hint = _('use --hidden to access hidden revisions')
497 hint = _('use --hidden to access hidden revisions')
498 raise error.FilteredRepoLookupError(msg, hint=hint)
498 raise error.FilteredRepoLookupError(msg, hint=hint)
499 msg = _("filtered revision '%s' (not in '%s' subset)")
499 msg = _("filtered revision '%s' (not in '%s' subset)")
500 msg %= (changeid, repo.filtername)
500 msg %= (changeid, repo.filtername)
501 raise error.FilteredRepoLookupError(msg)
501 raise error.FilteredRepoLookupError(msg)
502 except IndexError:
502 except IndexError:
503 pass
503 pass
504 raise error.RepoLookupError(
504 raise error.RepoLookupError(
505 _("unknown revision '%s'") % changeid)
505 _("unknown revision '%s'") % changeid)
506
506
507 def __hash__(self):
507 def __hash__(self):
508 try:
508 try:
509 return hash(self._rev)
509 return hash(self._rev)
510 except AttributeError:
510 except AttributeError:
511 return id(self)
511 return id(self)
512
512
513 def __nonzero__(self):
513 def __nonzero__(self):
514 return self._rev != nullrev
514 return self._rev != nullrev
515
515
516 @propertycache
516 @propertycache
517 def _changeset(self):
517 def _changeset(self):
518 return self._repo.changelog.read(self.rev())
518 return self._repo.changelog.read(self.rev())
519
519
520 @propertycache
520 @propertycache
521 def _manifest(self):
521 def _manifest(self):
522 return self._repo.manifest.read(self._changeset[0])
522 return self._repo.manifest.read(self._changeset[0])
523
523
524 @propertycache
524 @propertycache
525 def _manifestdelta(self):
525 def _manifestdelta(self):
526 return self._repo.manifest.readdelta(self._changeset[0])
526 return self._repo.manifest.readdelta(self._changeset[0])
527
527
528 @propertycache
528 @propertycache
529 def _parents(self):
529 def _parents(self):
530 p = self._repo.changelog.parentrevs(self._rev)
530 p = self._repo.changelog.parentrevs(self._rev)
531 if p[1] == nullrev:
531 if p[1] == nullrev:
532 p = p[:-1]
532 p = p[:-1]
533 return [changectx(self._repo, x) for x in p]
533 return [changectx(self._repo, x) for x in p]
534
534
535 def changeset(self):
535 def changeset(self):
536 return self._changeset
536 return self._changeset
537 def manifestnode(self):
537 def manifestnode(self):
538 return self._changeset[0]
538 return self._changeset[0]
539
539
540 def user(self):
540 def user(self):
541 return self._changeset[1]
541 return self._changeset[1]
542 def date(self):
542 def date(self):
543 return self._changeset[2]
543 return self._changeset[2]
544 def files(self):
544 def files(self):
545 return self._changeset[3]
545 return self._changeset[3]
546 def description(self):
546 def description(self):
547 return self._changeset[4]
547 return self._changeset[4]
548 def branch(self):
548 def branch(self):
549 return encoding.tolocal(self._changeset[5].get("branch"))
549 return encoding.tolocal(self._changeset[5].get("branch"))
550 def closesbranch(self):
550 def closesbranch(self):
551 return 'close' in self._changeset[5]
551 return 'close' in self._changeset[5]
552 def extra(self):
552 def extra(self):
553 return self._changeset[5]
553 return self._changeset[5]
554 def tags(self):
554 def tags(self):
555 return self._repo.nodetags(self._node)
555 return self._repo.nodetags(self._node)
556 def bookmarks(self):
556 def bookmarks(self):
557 return self._repo.nodebookmarks(self._node)
557 return self._repo.nodebookmarks(self._node)
558 def phase(self):
558 def phase(self):
559 return self._repo._phasecache.phase(self._repo, self._rev)
559 return self._repo._phasecache.phase(self._repo, self._rev)
560 def hidden(self):
560 def hidden(self):
561 return self._rev in repoview.filterrevs(self._repo, 'visible')
561 return self._rev in repoview.filterrevs(self._repo, 'visible')
562
562
563 def children(self):
563 def children(self):
564 """return contexts for each child changeset"""
564 """return contexts for each child changeset"""
565 c = self._repo.changelog.children(self._node)
565 c = self._repo.changelog.children(self._node)
566 return [changectx(self._repo, x) for x in c]
566 return [changectx(self._repo, x) for x in c]
567
567
568 def ancestors(self):
568 def ancestors(self):
569 for a in self._repo.changelog.ancestors([self._rev]):
569 for a in self._repo.changelog.ancestors([self._rev]):
570 yield changectx(self._repo, a)
570 yield changectx(self._repo, a)
571
571
572 def descendants(self):
572 def descendants(self):
573 for d in self._repo.changelog.descendants([self._rev]):
573 for d in self._repo.changelog.descendants([self._rev]):
574 yield changectx(self._repo, d)
574 yield changectx(self._repo, d)
575
575
576 def filectx(self, path, fileid=None, filelog=None):
576 def filectx(self, path, fileid=None, filelog=None):
577 """get a file context from this changeset"""
577 """get a file context from this changeset"""
578 if fileid is None:
578 if fileid is None:
579 fileid = self.filenode(path)
579 fileid = self.filenode(path)
580 return filectx(self._repo, path, fileid=fileid,
580 return filectx(self._repo, path, fileid=fileid,
581 changectx=self, filelog=filelog)
581 changectx=self, filelog=filelog)
582
582
583 def ancestor(self, c2, warn=False):
583 def ancestor(self, c2, warn=False):
584 """return the "best" ancestor context of self and c2
584 """return the "best" ancestor context of self and c2
585
585
586 If there are multiple candidates, it will show a message and check
586 If there are multiple candidates, it will show a message and check
587 merge.preferancestor configuration before falling back to the
587 merge.preferancestor configuration before falling back to the
588 revlog ancestor."""
588 revlog ancestor."""
589 # deal with workingctxs
589 # deal with workingctxs
590 n2 = c2._node
590 n2 = c2._node
591 if n2 is None:
591 if n2 is None:
592 n2 = c2._parents[0]._node
592 n2 = c2._parents[0]._node
593 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
593 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
594 if not cahs:
594 if not cahs:
595 anc = nullid
595 anc = nullid
596 elif len(cahs) == 1:
596 elif len(cahs) == 1:
597 anc = cahs[0]
597 anc = cahs[0]
598 else:
598 else:
599 for r in self._repo.ui.configlist('merge', 'preferancestor'):
599 for r in self._repo.ui.configlist('merge', 'preferancestor'):
600 try:
600 try:
601 ctx = changectx(self._repo, r)
601 ctx = changectx(self._repo, r)
602 except error.RepoLookupError:
602 except error.RepoLookupError:
603 continue
603 continue
604 anc = ctx.node()
604 anc = ctx.node()
605 if anc in cahs:
605 if anc in cahs:
606 break
606 break
607 else:
607 else:
608 anc = self._repo.changelog.ancestor(self._node, n2)
608 anc = self._repo.changelog.ancestor(self._node, n2)
609 if warn:
609 if warn:
610 self._repo.ui.status(
610 self._repo.ui.status(
611 (_("note: using %s as ancestor of %s and %s\n") %
611 (_("note: using %s as ancestor of %s and %s\n") %
612 (short(anc), short(self._node), short(n2))) +
612 (short(anc), short(self._node), short(n2))) +
613 ''.join(_(" alternatively, use --config "
613 ''.join(_(" alternatively, use --config "
614 "merge.preferancestor=%s\n") %
614 "merge.preferancestor=%s\n") %
615 short(n) for n in sorted(cahs) if n != anc))
615 short(n) for n in sorted(cahs) if n != anc))
616 return changectx(self._repo, anc)
616 return changectx(self._repo, anc)
617
617
618 def descendant(self, other):
618 def descendant(self, other):
619 """True if other is descendant of this changeset"""
619 """True if other is descendant of this changeset"""
620 return self._repo.changelog.descendant(self._rev, other._rev)
620 return self._repo.changelog.descendant(self._rev, other._rev)
621
621
622 def walk(self, match):
622 def walk(self, match):
623 fset = set(match.files())
623 fset = set(match.files())
624 # for dirstate.walk, files=['.'] means "walk the whole tree".
624 # for dirstate.walk, files=['.'] means "walk the whole tree".
625 # follow that here, too
625 # follow that here, too
626 fset.discard('.')
626 fset.discard('.')
627
627
628 # avoid the entire walk if we're only looking for specific files
628 # avoid the entire walk if we're only looking for specific files
629 if fset and not match.anypats():
629 if fset and not match.anypats():
630 if util.all([fn in self for fn in fset]):
630 if util.all([fn in self for fn in fset]):
631 for fn in sorted(fset):
631 for fn in sorted(fset):
632 if match(fn):
632 if match(fn):
633 yield fn
633 yield fn
634 raise StopIteration
634 raise StopIteration
635
635
636 for fn in self:
636 for fn in self:
637 if fn in fset:
637 if fn in fset:
638 # specified pattern is the exact name
638 # specified pattern is the exact name
639 fset.remove(fn)
639 fset.remove(fn)
640 if match(fn):
640 if match(fn):
641 yield fn
641 yield fn
642 for fn in sorted(fset):
642 for fn in sorted(fset):
643 if fn in self._dirs:
643 if fn in self._dirs:
644 # specified pattern is a directory
644 # specified pattern is a directory
645 continue
645 continue
646 match.bad(fn, _('no such file in rev %s') % self)
646 match.bad(fn, _('no such file in rev %s') % self)
647
647
648 def matches(self, match):
648 def matches(self, match):
649 return self.walk(match)
649 return self.walk(match)
650
650
651 class basefilectx(object):
651 class basefilectx(object):
652 """A filecontext object represents the common logic for its children:
652 """A filecontext object represents the common logic for its children:
653 filectx: read-only access to a filerevision that is already present
653 filectx: read-only access to a filerevision that is already present
654 in the repo,
654 in the repo,
655 workingfilectx: a filecontext that represents files from the working
655 workingfilectx: a filecontext that represents files from the working
656 directory,
656 directory,
657 memfilectx: a filecontext that represents files in-memory."""
657 memfilectx: a filecontext that represents files in-memory."""
658 def __new__(cls, repo, path, *args, **kwargs):
658 def __new__(cls, repo, path, *args, **kwargs):
659 return super(basefilectx, cls).__new__(cls)
659 return super(basefilectx, cls).__new__(cls)
660
660
661 @propertycache
661 @propertycache
662 def _filelog(self):
662 def _filelog(self):
663 return self._repo.file(self._path)
663 return self._repo.file(self._path)
664
664
665 @propertycache
665 @propertycache
666 def _changeid(self):
666 def _changeid(self):
667 if '_changeid' in self.__dict__:
667 if '_changeid' in self.__dict__:
668 return self._changeid
668 return self._changeid
669 elif '_changectx' in self.__dict__:
669 elif '_changectx' in self.__dict__:
670 return self._changectx.rev()
670 return self._changectx.rev()
671 else:
671 else:
672 return self._filelog.linkrev(self._filerev)
672 return self._filelog.linkrev(self._filerev)
673
673
674 @propertycache
674 @propertycache
675 def _filenode(self):
675 def _filenode(self):
676 if '_fileid' in self.__dict__:
676 if '_fileid' in self.__dict__:
677 return self._filelog.lookup(self._fileid)
677 return self._filelog.lookup(self._fileid)
678 else:
678 else:
679 return self._changectx.filenode(self._path)
679 return self._changectx.filenode(self._path)
680
680
681 @propertycache
681 @propertycache
682 def _filerev(self):
682 def _filerev(self):
683 return self._filelog.rev(self._filenode)
683 return self._filelog.rev(self._filenode)
684
684
685 @propertycache
685 @propertycache
686 def _repopath(self):
686 def _repopath(self):
687 return self._path
687 return self._path
688
688
689 def __nonzero__(self):
689 def __nonzero__(self):
690 try:
690 try:
691 self._filenode
691 self._filenode
692 return True
692 return True
693 except error.LookupError:
693 except error.LookupError:
694 # file is missing
694 # file is missing
695 return False
695 return False
696
696
697 def __str__(self):
697 def __str__(self):
698 return "%s@%s" % (self.path(), self._changectx)
698 return "%s@%s" % (self.path(), self._changectx)
699
699
700 def __repr__(self):
700 def __repr__(self):
701 return "<%s %s>" % (type(self).__name__, str(self))
701 return "<%s %s>" % (type(self).__name__, str(self))
702
702
703 def __hash__(self):
703 def __hash__(self):
704 try:
704 try:
705 return hash((self._path, self._filenode))
705 return hash((self._path, self._filenode))
706 except AttributeError:
706 except AttributeError:
707 return id(self)
707 return id(self)
708
708
709 def __eq__(self, other):
709 def __eq__(self, other):
710 try:
710 try:
711 return (type(self) == type(other) and self._path == other._path
711 return (type(self) == type(other) and self._path == other._path
712 and self._filenode == other._filenode)
712 and self._filenode == other._filenode)
713 except AttributeError:
713 except AttributeError:
714 return False
714 return False
715
715
716 def __ne__(self, other):
716 def __ne__(self, other):
717 return not (self == other)
717 return not (self == other)
718
718
719 def filerev(self):
719 def filerev(self):
720 return self._filerev
720 return self._filerev
721 def filenode(self):
721 def filenode(self):
722 return self._filenode
722 return self._filenode
723 def flags(self):
723 def flags(self):
724 return self._changectx.flags(self._path)
724 return self._changectx.flags(self._path)
725 def filelog(self):
725 def filelog(self):
726 return self._filelog
726 return self._filelog
727 def rev(self):
727 def rev(self):
728 return self._changeid
728 return self._changeid
729 def linkrev(self):
729 def linkrev(self):
730 return self._filelog.linkrev(self._filerev)
730 return self._filelog.linkrev(self._filerev)
731 def node(self):
731 def node(self):
732 return self._changectx.node()
732 return self._changectx.node()
733 def hex(self):
733 def hex(self):
734 return self._changectx.hex()
734 return self._changectx.hex()
735 def user(self):
735 def user(self):
736 return self._changectx.user()
736 return self._changectx.user()
737 def date(self):
737 def date(self):
738 return self._changectx.date()
738 return self._changectx.date()
739 def files(self):
739 def files(self):
740 return self._changectx.files()
740 return self._changectx.files()
741 def description(self):
741 def description(self):
742 return self._changectx.description()
742 return self._changectx.description()
743 def branch(self):
743 def branch(self):
744 return self._changectx.branch()
744 return self._changectx.branch()
745 def extra(self):
745 def extra(self):
746 return self._changectx.extra()
746 return self._changectx.extra()
747 def phase(self):
747 def phase(self):
748 return self._changectx.phase()
748 return self._changectx.phase()
749 def phasestr(self):
749 def phasestr(self):
750 return self._changectx.phasestr()
750 return self._changectx.phasestr()
751 def manifest(self):
751 def manifest(self):
752 return self._changectx.manifest()
752 return self._changectx.manifest()
753 def changectx(self):
753 def changectx(self):
754 return self._changectx
754 return self._changectx
755
755
756 def path(self):
756 def path(self):
757 return self._path
757 return self._path
758
758
759 def isbinary(self):
759 def isbinary(self):
760 try:
760 try:
761 return util.binary(self.data())
761 return util.binary(self.data())
762 except IOError:
762 except IOError:
763 return False
763 return False
764 def isexec(self):
764 def isexec(self):
765 return 'x' in self.flags()
765 return 'x' in self.flags()
766 def islink(self):
766 def islink(self):
767 return 'l' in self.flags()
767 return 'l' in self.flags()
768
768
769 def cmp(self, fctx):
769 def cmp(self, fctx):
770 """compare with other file context
770 """compare with other file context
771
771
772 returns True if different than fctx.
772 returns True if different than fctx.
773 """
773 """
774 if (fctx._filerev is None
774 if (fctx._filerev is None
775 and (self._repo._encodefilterpats
775 and (self._repo._encodefilterpats
776 # if file data starts with '\1\n', empty metadata block is
776 # if file data starts with '\1\n', empty metadata block is
777 # prepended, which adds 4 bytes to filelog.size().
777 # prepended, which adds 4 bytes to filelog.size().
778 or self.size() - 4 == fctx.size())
778 or self.size() - 4 == fctx.size())
779 or self.size() == fctx.size()):
779 or self.size() == fctx.size()):
780 return self._filelog.cmp(self._filenode, fctx.data())
780 return self._filelog.cmp(self._filenode, fctx.data())
781
781
782 return True
782 return True
783
783
784 def introrev(self):
784 def introrev(self):
785 """return the rev of the changeset which introduced this file revision
785 """return the rev of the changeset which introduced this file revision
786
786
787 This method is different from linkrev because it take into account the
787 This method is different from linkrev because it take into account the
788 changeset the filectx was created from. It ensures the returned
788 changeset the filectx was created from. It ensures the returned
789 revision is one of its ancestors. This prevents bugs from
789 revision is one of its ancestors. This prevents bugs from
790 'linkrev-shadowing' when a file revision is used by multiple
790 'linkrev-shadowing' when a file revision is used by multiple
791 changesets.
791 changesets.
792 """
792 """
793 lkr = self.linkrev()
793 lkr = self.linkrev()
794 attrs = vars(self)
794 attrs = vars(self)
795 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
795 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
796 if noctx or self.rev() == lkr:
796 if noctx or self.rev() == lkr:
797 return self.linkrev()
797 return self.linkrev()
798 return _adjustlinkrev(self._repo, self._path, self._filelog,
798 return _adjustlinkrev(self._repo, self._path, self._filelog,
799 self._filenode, self.rev(), inclusive=True)
799 self._filenode, self.rev(), inclusive=True)
800
800
801 def parents(self):
801 def parents(self):
802 _path = self._path
802 _path = self._path
803 fl = self._filelog
803 fl = self._filelog
804 parents = self._filelog.parents(self._filenode)
804 parents = self._filelog.parents(self._filenode)
805 pl = [(_path, node, fl) for node in parents if node != nullid]
805 pl = [(_path, node, fl) for node in parents if node != nullid]
806
806
807 r = fl.renamed(self._filenode)
807 r = fl.renamed(self._filenode)
808 if r:
808 if r:
809 # - In the simple rename case, both parent are nullid, pl is empty.
809 # - In the simple rename case, both parent are nullid, pl is empty.
810 # - In case of merge, only one of the parent is null id and should
810 # - In case of merge, only one of the parent is null id and should
811 # be replaced with the rename information. This parent is -always-
811 # be replaced with the rename information. This parent is -always-
812 # the first one.
812 # the first one.
813 #
813 #
814 # As null id have alway been filtered out in the previous list
814 # As null id have alway been filtered out in the previous list
815 # comprehension, inserting to 0 will always result in "replacing
815 # comprehension, inserting to 0 will always result in "replacing
816 # first nullid parent with rename information.
816 # first nullid parent with rename information.
817 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
817 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
818
818
819 ret = []
819 ret = []
820 for path, fnode, l in pl:
820 for path, fnode, l in pl:
821 if '_changeid' in vars(self) or '_changectx' in vars(self):
821 if '_changeid' in vars(self) or '_changectx' in vars(self):
822 # If self is associated with a changeset (probably explicitly
822 # If self is associated with a changeset (probably explicitly
823 # fed), ensure the created filectx is associated with a
823 # fed), ensure the created filectx is associated with a
824 # changeset that is an ancestor of self.changectx.
824 # changeset that is an ancestor of self.changectx.
825 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
825 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
826 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
826 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
827 changeid=rev)
827 changeid=rev)
828 else:
828 else:
829 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
829 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
830 ret.append(fctx)
830 ret.append(fctx)
831 return ret
831 return ret
832
832
833 def p1(self):
833 def p1(self):
834 return self.parents()[0]
834 return self.parents()[0]
835
835
836 def p2(self):
836 def p2(self):
837 p = self.parents()
837 p = self.parents()
838 if len(p) == 2:
838 if len(p) == 2:
839 return p[1]
839 return p[1]
840 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
840 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
841
841
842 def annotate(self, follow=False, linenumber=None, diffopts=None):
842 def annotate(self, follow=False, linenumber=None, diffopts=None):
843 '''returns a list of tuples of (ctx, line) for each line
843 '''returns a list of tuples of (ctx, line) for each line
844 in the file, where ctx is the filectx of the node where
844 in the file, where ctx is the filectx of the node where
845 that line was last changed.
845 that line was last changed.
846 This returns tuples of ((ctx, linenumber), line) for each line,
846 This returns tuples of ((ctx, linenumber), line) for each line,
847 if "linenumber" parameter is NOT "None".
847 if "linenumber" parameter is NOT "None".
848 In such tuples, linenumber means one at the first appearance
848 In such tuples, linenumber means one at the first appearance
849 in the managed file.
849 in the managed file.
850 To reduce annotation cost,
850 To reduce annotation cost,
851 this returns fixed value(False is used) as linenumber,
851 this returns fixed value(False is used) as linenumber,
852 if "linenumber" parameter is "False".'''
852 if "linenumber" parameter is "False".'''
853
853
854 if linenumber is None:
854 if linenumber is None:
855 def decorate(text, rev):
855 def decorate(text, rev):
856 return ([rev] * len(text.splitlines()), text)
856 return ([rev] * len(text.splitlines()), text)
857 elif linenumber:
857 elif linenumber:
858 def decorate(text, rev):
858 def decorate(text, rev):
859 size = len(text.splitlines())
859 size = len(text.splitlines())
860 return ([(rev, i) for i in xrange(1, size + 1)], text)
860 return ([(rev, i) for i in xrange(1, size + 1)], text)
861 else:
861 else:
862 def decorate(text, rev):
862 def decorate(text, rev):
863 return ([(rev, False)] * len(text.splitlines()), text)
863 return ([(rev, False)] * len(text.splitlines()), text)
864
864
865 def pair(parent, child):
865 def pair(parent, child):
866 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
866 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
867 refine=True)
867 refine=True)
868 for (a1, a2, b1, b2), t in blocks:
868 for (a1, a2, b1, b2), t in blocks:
869 # Changed blocks ('!') or blocks made only of blank lines ('~')
869 # Changed blocks ('!') or blocks made only of blank lines ('~')
870 # belong to the child.
870 # belong to the child.
871 if t == '=':
871 if t == '=':
872 child[0][b1:b2] = parent[0][a1:a2]
872 child[0][b1:b2] = parent[0][a1:a2]
873 return child
873 return child
874
874
875 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
875 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
876
876
877 def parents(f):
877 def parents(f):
878 pl = f.parents()
878 pl = f.parents()
879
879
880 # Don't return renamed parents if we aren't following.
880 # Don't return renamed parents if we aren't following.
881 if not follow:
881 if not follow:
882 pl = [p for p in pl if p.path() == f.path()]
882 pl = [p for p in pl if p.path() == f.path()]
883
883
884 # renamed filectx won't have a filelog yet, so set it
884 # renamed filectx won't have a filelog yet, so set it
885 # from the cache to save time
885 # from the cache to save time
886 for p in pl:
886 for p in pl:
887 if not '_filelog' in p.__dict__:
887 if not '_filelog' in p.__dict__:
888 p._filelog = getlog(p.path())
888 p._filelog = getlog(p.path())
889
889
890 return pl
890 return pl
891
891
892 # use linkrev to find the first changeset where self appeared
892 # use linkrev to find the first changeset where self appeared
893 base = self
893 base = self
894 introrev = self.introrev()
894 introrev = self.introrev()
895 if self.rev() != introrev:
895 if self.rev() != introrev:
896 base = self.filectx(self.filenode(), changeid=introrev)
896 base = self.filectx(self.filenode(), changeid=introrev)
897
897
898 # This algorithm would prefer to be recursive, but Python is a
898 # This algorithm would prefer to be recursive, but Python is a
899 # bit recursion-hostile. Instead we do an iterative
899 # bit recursion-hostile. Instead we do an iterative
900 # depth-first search.
900 # depth-first search.
901
901
902 visit = [base]
902 visit = [base]
903 hist = {}
903 hist = {}
904 pcache = {}
904 pcache = {}
905 needed = {base: 1}
905 needed = {base: 1}
906 while visit:
906 while visit:
907 f = visit[-1]
907 f = visit[-1]
908 pcached = f in pcache
908 pcached = f in pcache
909 if not pcached:
909 if not pcached:
910 pcache[f] = parents(f)
910 pcache[f] = parents(f)
911
911
912 ready = True
912 ready = True
913 pl = pcache[f]
913 pl = pcache[f]
914 for p in pl:
914 for p in pl:
915 if p not in hist:
915 if p not in hist:
916 ready = False
916 ready = False
917 visit.append(p)
917 visit.append(p)
918 if not pcached:
918 if not pcached:
919 needed[p] = needed.get(p, 0) + 1
919 needed[p] = needed.get(p, 0) + 1
920 if ready:
920 if ready:
921 visit.pop()
921 visit.pop()
922 reusable = f in hist
922 reusable = f in hist
923 if reusable:
923 if reusable:
924 curr = hist[f]
924 curr = hist[f]
925 else:
925 else:
926 curr = decorate(f.data(), f)
926 curr = decorate(f.data(), f)
927 for p in pl:
927 for p in pl:
928 if not reusable:
928 if not reusable:
929 curr = pair(hist[p], curr)
929 curr = pair(hist[p], curr)
930 if needed[p] == 1:
930 if needed[p] == 1:
931 del hist[p]
931 del hist[p]
932 del needed[p]
932 del needed[p]
933 else:
933 else:
934 needed[p] -= 1
934 needed[p] -= 1
935
935
936 hist[f] = curr
936 hist[f] = curr
937 pcache[f] = []
937 pcache[f] = []
938
938
939 return zip(hist[base][0], hist[base][1].splitlines(True))
939 return zip(hist[base][0], hist[base][1].splitlines(True))
940
940
941 def ancestors(self, followfirst=False):
941 def ancestors(self, followfirst=False):
942 visit = {}
942 visit = {}
943 c = self
943 c = self
944 cut = followfirst and 1 or None
944 cut = followfirst and 1 or None
945 while True:
945 while True:
946 for parent in c.parents()[:cut]:
946 for parent in c.parents()[:cut]:
947 visit[(parent.rev(), parent.node())] = parent
947 visit[(parent.rev(), parent.node())] = parent
948 if not visit:
948 if not visit:
949 break
949 break
950 c = visit.pop(max(visit))
950 c = visit.pop(max(visit))
951 yield c
951 yield c
952
952
953 class filectx(basefilectx):
953 class filectx(basefilectx):
954 """A filecontext object makes access to data related to a particular
954 """A filecontext object makes access to data related to a particular
955 filerevision convenient."""
955 filerevision convenient."""
956 def __init__(self, repo, path, changeid=None, fileid=None,
956 def __init__(self, repo, path, changeid=None, fileid=None,
957 filelog=None, changectx=None):
957 filelog=None, changectx=None):
958 """changeid can be a changeset revision, node, or tag.
958 """changeid can be a changeset revision, node, or tag.
959 fileid can be a file revision or node."""
959 fileid can be a file revision or node."""
960 self._repo = repo
960 self._repo = repo
961 self._path = path
961 self._path = path
962
962
963 assert (changeid is not None
963 assert (changeid is not None
964 or fileid is not None
964 or fileid is not None
965 or changectx is not None), \
965 or changectx is not None), \
966 ("bad args: changeid=%r, fileid=%r, changectx=%r"
966 ("bad args: changeid=%r, fileid=%r, changectx=%r"
967 % (changeid, fileid, changectx))
967 % (changeid, fileid, changectx))
968
968
969 if filelog is not None:
969 if filelog is not None:
970 self._filelog = filelog
970 self._filelog = filelog
971
971
972 if changeid is not None:
972 if changeid is not None:
973 self._changeid = changeid
973 self._changeid = changeid
974 if changectx is not None:
974 if changectx is not None:
975 self._changectx = changectx
975 self._changectx = changectx
976 if fileid is not None:
976 if fileid is not None:
977 self._fileid = fileid
977 self._fileid = fileid
978
978
979 @propertycache
979 @propertycache
980 def _changectx(self):
980 def _changectx(self):
981 try:
981 try:
982 return changectx(self._repo, self._changeid)
982 return changectx(self._repo, self._changeid)
983 except error.FilteredRepoLookupError:
983 except error.FilteredRepoLookupError:
984 # Linkrev may point to any revision in the repository. When the
984 # Linkrev may point to any revision in the repository. When the
985 # repository is filtered this may lead to `filectx` trying to build
985 # repository is filtered this may lead to `filectx` trying to build
986 # `changectx` for filtered revision. In such case we fallback to
986 # `changectx` for filtered revision. In such case we fallback to
987 # creating `changectx` on the unfiltered version of the reposition.
987 # creating `changectx` on the unfiltered version of the reposition.
988 # This fallback should not be an issue because `changectx` from
988 # This fallback should not be an issue because `changectx` from
989 # `filectx` are not used in complex operations that care about
989 # `filectx` are not used in complex operations that care about
990 # filtering.
990 # filtering.
991 #
991 #
992 # This fallback is a cheap and dirty fix that prevent several
992 # This fallback is a cheap and dirty fix that prevent several
993 # crashes. It does not ensure the behavior is correct. However the
993 # crashes. It does not ensure the behavior is correct. However the
994 # behavior was not correct before filtering either and "incorrect
994 # behavior was not correct before filtering either and "incorrect
995 # behavior" is seen as better as "crash"
995 # behavior" is seen as better as "crash"
996 #
996 #
997 # Linkrevs have several serious troubles with filtering that are
997 # Linkrevs have several serious troubles with filtering that are
998 # complicated to solve. Proper handling of the issue here should be
998 # complicated to solve. Proper handling of the issue here should be
999 # considered when solving linkrev issue are on the table.
999 # considered when solving linkrev issue are on the table.
1000 return changectx(self._repo.unfiltered(), self._changeid)
1000 return changectx(self._repo.unfiltered(), self._changeid)
1001
1001
1002 def filectx(self, fileid, changeid=None):
1002 def filectx(self, fileid, changeid=None):
1003 '''opens an arbitrary revision of the file without
1003 '''opens an arbitrary revision of the file without
1004 opening a new filelog'''
1004 opening a new filelog'''
1005 return filectx(self._repo, self._path, fileid=fileid,
1005 return filectx(self._repo, self._path, fileid=fileid,
1006 filelog=self._filelog, changeid=changeid)
1006 filelog=self._filelog, changeid=changeid)
1007
1007
1008 def data(self):
1008 def data(self):
1009 try:
1009 try:
1010 return self._filelog.read(self._filenode)
1010 return self._filelog.read(self._filenode)
1011 except error.CensoredNodeError:
1011 except error.CensoredNodeError:
1012 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1012 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1013 return ""
1013 return ""
1014 raise util.Abort(_("censored node: %s") % short(self._filenode),
1014 raise util.Abort(_("censored node: %s") % short(self._filenode),
1015 hint=_("set censor.policy to ignore errors"))
1015 hint=_("set censor.policy to ignore errors"))
1016
1016
1017 def size(self):
1017 def size(self):
1018 return self._filelog.size(self._filerev)
1018 return self._filelog.size(self._filerev)
1019
1019
1020 def renamed(self):
1020 def renamed(self):
1021 """check if file was actually renamed in this changeset revision
1021 """check if file was actually renamed in this changeset revision
1022
1022
1023 If rename logged in file revision, we report copy for changeset only
1023 If rename logged in file revision, we report copy for changeset only
1024 if file revisions linkrev points back to the changeset in question
1024 if file revisions linkrev points back to the changeset in question
1025 or both changeset parents contain different file revisions.
1025 or both changeset parents contain different file revisions.
1026 """
1026 """
1027
1027
1028 renamed = self._filelog.renamed(self._filenode)
1028 renamed = self._filelog.renamed(self._filenode)
1029 if not renamed:
1029 if not renamed:
1030 return renamed
1030 return renamed
1031
1031
1032 if self.rev() == self.linkrev():
1032 if self.rev() == self.linkrev():
1033 return renamed
1033 return renamed
1034
1034
1035 name = self.path()
1035 name = self.path()
1036 fnode = self._filenode
1036 fnode = self._filenode
1037 for p in self._changectx.parents():
1037 for p in self._changectx.parents():
1038 try:
1038 try:
1039 if fnode == p.filenode(name):
1039 if fnode == p.filenode(name):
1040 return None
1040 return None
1041 except error.LookupError:
1041 except error.LookupError:
1042 pass
1042 pass
1043 return renamed
1043 return renamed
1044
1044
1045 def children(self):
1045 def children(self):
1046 # hard for renames
1046 # hard for renames
1047 c = self._filelog.children(self._filenode)
1047 c = self._filelog.children(self._filenode)
1048 return [filectx(self._repo, self._path, fileid=x,
1048 return [filectx(self._repo, self._path, fileid=x,
1049 filelog=self._filelog) for x in c]
1049 filelog=self._filelog) for x in c]
1050
1050
1051 class committablectx(basectx):
1051 class committablectx(basectx):
1052 """A committablectx object provides common functionality for a context that
1052 """A committablectx object provides common functionality for a context that
1053 wants the ability to commit, e.g. workingctx or memctx."""
1053 wants the ability to commit, e.g. workingctx or memctx."""
1054 def __init__(self, repo, text="", user=None, date=None, extra=None,
1054 def __init__(self, repo, text="", user=None, date=None, extra=None,
1055 changes=None):
1055 changes=None):
1056 self._repo = repo
1056 self._repo = repo
1057 self._rev = None
1057 self._rev = None
1058 self._node = None
1058 self._node = None
1059 self._text = text
1059 self._text = text
1060 if date:
1060 if date:
1061 self._date = util.parsedate(date)
1061 self._date = util.parsedate(date)
1062 if user:
1062 if user:
1063 self._user = user
1063 self._user = user
1064 if changes:
1064 if changes:
1065 self._status = changes
1065 self._status = changes
1066
1066
1067 self._extra = {}
1067 self._extra = {}
1068 if extra:
1068 if extra:
1069 self._extra = extra.copy()
1069 self._extra = extra.copy()
1070 if 'branch' not in self._extra:
1070 if 'branch' not in self._extra:
1071 try:
1071 try:
1072 branch = encoding.fromlocal(self._repo.dirstate.branch())
1072 branch = encoding.fromlocal(self._repo.dirstate.branch())
1073 except UnicodeDecodeError:
1073 except UnicodeDecodeError:
1074 raise util.Abort(_('branch name not in UTF-8!'))
1074 raise util.Abort(_('branch name not in UTF-8!'))
1075 self._extra['branch'] = branch
1075 self._extra['branch'] = branch
1076 if self._extra['branch'] == '':
1076 if self._extra['branch'] == '':
1077 self._extra['branch'] = 'default'
1077 self._extra['branch'] = 'default'
1078
1078
1079 def __str__(self):
1079 def __str__(self):
1080 return str(self._parents[0]) + "+"
1080 return str(self._parents[0]) + "+"
1081
1081
1082 def __nonzero__(self):
1082 def __nonzero__(self):
1083 return True
1083 return True
1084
1084
1085 def _buildflagfunc(self):
1085 def _buildflagfunc(self):
1086 # Create a fallback function for getting file flags when the
1086 # Create a fallback function for getting file flags when the
1087 # filesystem doesn't support them
1087 # filesystem doesn't support them
1088
1088
1089 copiesget = self._repo.dirstate.copies().get
1089 copiesget = self._repo.dirstate.copies().get
1090
1090
1091 if len(self._parents) < 2:
1091 if len(self._parents) < 2:
1092 # when we have one parent, it's easy: copy from parent
1092 # when we have one parent, it's easy: copy from parent
1093 man = self._parents[0].manifest()
1093 man = self._parents[0].manifest()
1094 def func(f):
1094 def func(f):
1095 f = copiesget(f, f)
1095 f = copiesget(f, f)
1096 return man.flags(f)
1096 return man.flags(f)
1097 else:
1097 else:
1098 # merges are tricky: we try to reconstruct the unstored
1098 # merges are tricky: we try to reconstruct the unstored
1099 # result from the merge (issue1802)
1099 # result from the merge (issue1802)
1100 p1, p2 = self._parents
1100 p1, p2 = self._parents
1101 pa = p1.ancestor(p2)
1101 pa = p1.ancestor(p2)
1102 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1102 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1103
1103
1104 def func(f):
1104 def func(f):
1105 f = copiesget(f, f) # may be wrong for merges with copies
1105 f = copiesget(f, f) # may be wrong for merges with copies
1106 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1106 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1107 if fl1 == fl2:
1107 if fl1 == fl2:
1108 return fl1
1108 return fl1
1109 if fl1 == fla:
1109 if fl1 == fla:
1110 return fl2
1110 return fl2
1111 if fl2 == fla:
1111 if fl2 == fla:
1112 return fl1
1112 return fl1
1113 return '' # punt for conflicts
1113 return '' # punt for conflicts
1114
1114
1115 return func
1115 return func
1116
1116
1117 @propertycache
1117 @propertycache
1118 def _flagfunc(self):
1118 def _flagfunc(self):
1119 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1119 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1120
1120
1121 @propertycache
1121 @propertycache
1122 def _manifest(self):
1122 def _manifest(self):
1123 """generate a manifest corresponding to the values in self._status
1123 """generate a manifest corresponding to the values in self._status
1124
1124
1125 This reuse the file nodeid from parent, but we append an extra letter
1125 This reuse the file nodeid from parent, but we append an extra letter
1126 when modified. Modified files get an extra 'm' while added files get
1126 when modified. Modified files get an extra 'm' while added files get
1127 an extra 'a'. This is used by manifests merge to see that files
1127 an extra 'a'. This is used by manifests merge to see that files
1128 are different and by update logic to avoid deleting newly added files.
1128 are different and by update logic to avoid deleting newly added files.
1129 """
1129 """
1130
1130
1131 man1 = self._parents[0].manifest()
1131 man1 = self._parents[0].manifest()
1132 man = man1.copy()
1132 man = man1.copy()
1133 if len(self._parents) > 1:
1133 if len(self._parents) > 1:
1134 man2 = self.p2().manifest()
1134 man2 = self.p2().manifest()
1135 def getman(f):
1135 def getman(f):
1136 if f in man1:
1136 if f in man1:
1137 return man1
1137 return man1
1138 return man2
1138 return man2
1139 else:
1139 else:
1140 getman = lambda f: man1
1140 getman = lambda f: man1
1141
1141
1142 copied = self._repo.dirstate.copies()
1142 copied = self._repo.dirstate.copies()
1143 ff = self._flagfunc
1143 ff = self._flagfunc
1144 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1144 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1145 for f in l:
1145 for f in l:
1146 orig = copied.get(f, f)
1146 orig = copied.get(f, f)
1147 man[f] = getman(orig).get(orig, nullid) + i
1147 man[f] = getman(orig).get(orig, nullid) + i
1148 try:
1148 try:
1149 man.setflag(f, ff(f))
1149 man.setflag(f, ff(f))
1150 except OSError:
1150 except OSError:
1151 pass
1151 pass
1152
1152
1153 for f in self._status.deleted + self._status.removed:
1153 for f in self._status.deleted + self._status.removed:
1154 if f in man:
1154 if f in man:
1155 del man[f]
1155 del man[f]
1156
1156
1157 return man
1157 return man
1158
1158
1159 @propertycache
1159 @propertycache
1160 def _status(self):
1160 def _status(self):
1161 return self._repo.status()
1161 return self._repo.status()
1162
1162
1163 @propertycache
1163 @propertycache
1164 def _user(self):
1164 def _user(self):
1165 return self._repo.ui.username()
1165 return self._repo.ui.username()
1166
1166
1167 @propertycache
1167 @propertycache
1168 def _date(self):
1168 def _date(self):
1169 return util.makedate()
1169 return util.makedate()
1170
1170
1171 def subrev(self, subpath):
1171 def subrev(self, subpath):
1172 return None
1172 return None
1173
1173
1174 def user(self):
1174 def user(self):
1175 return self._user or self._repo.ui.username()
1175 return self._user or self._repo.ui.username()
1176 def date(self):
1176 def date(self):
1177 return self._date
1177 return self._date
1178 def description(self):
1178 def description(self):
1179 return self._text
1179 return self._text
1180 def files(self):
1180 def files(self):
1181 return sorted(self._status.modified + self._status.added +
1181 return sorted(self._status.modified + self._status.added +
1182 self._status.removed)
1182 self._status.removed)
1183
1183
1184 def modified(self):
1184 def modified(self):
1185 return self._status.modified
1185 return self._status.modified
1186 def added(self):
1186 def added(self):
1187 return self._status.added
1187 return self._status.added
1188 def removed(self):
1188 def removed(self):
1189 return self._status.removed
1189 return self._status.removed
1190 def deleted(self):
1190 def deleted(self):
1191 return self._status.deleted
1191 return self._status.deleted
1192 def branch(self):
1192 def branch(self):
1193 return encoding.tolocal(self._extra['branch'])
1193 return encoding.tolocal(self._extra['branch'])
1194 def closesbranch(self):
1194 def closesbranch(self):
1195 return 'close' in self._extra
1195 return 'close' in self._extra
1196 def extra(self):
1196 def extra(self):
1197 return self._extra
1197 return self._extra
1198
1198
1199 def tags(self):
1199 def tags(self):
1200 t = []
1200 t = []
1201 for p in self.parents():
1201 for p in self.parents():
1202 t.extend(p.tags())
1202 t.extend(p.tags())
1203 return t
1203 return t
1204
1204
1205 def bookmarks(self):
1205 def bookmarks(self):
1206 b = []
1206 b = []
1207 for p in self.parents():
1207 for p in self.parents():
1208 b.extend(p.bookmarks())
1208 b.extend(p.bookmarks())
1209 return b
1209 return b
1210
1210
1211 def phase(self):
1211 def phase(self):
1212 phase = phases.draft # default phase to draft
1212 phase = phases.draft # default phase to draft
1213 for p in self.parents():
1213 for p in self.parents():
1214 phase = max(phase, p.phase())
1214 phase = max(phase, p.phase())
1215 return phase
1215 return phase
1216
1216
1217 def hidden(self):
1217 def hidden(self):
1218 return False
1218 return False
1219
1219
1220 def children(self):
1220 def children(self):
1221 return []
1221 return []
1222
1222
1223 def flags(self, path):
1223 def flags(self, path):
1224 if '_manifest' in self.__dict__:
1224 if '_manifest' in self.__dict__:
1225 try:
1225 try:
1226 return self._manifest.flags(path)
1226 return self._manifest.flags(path)
1227 except KeyError:
1227 except KeyError:
1228 return ''
1228 return ''
1229
1229
1230 try:
1230 try:
1231 return self._flagfunc(path)
1231 return self._flagfunc(path)
1232 except OSError:
1232 except OSError:
1233 return ''
1233 return ''
1234
1234
1235 def ancestor(self, c2):
1235 def ancestor(self, c2):
1236 """return the "best" ancestor context of self and c2"""
1236 """return the "best" ancestor context of self and c2"""
1237 return self._parents[0].ancestor(c2) # punt on two parents for now
1237 return self._parents[0].ancestor(c2) # punt on two parents for now
1238
1238
1239 def walk(self, match):
1239 def walk(self, match):
1240 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1240 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1241 True, False))
1241 True, False))
1242
1242
1243 def matches(self, match):
1243 def matches(self, match):
1244 return sorted(self._repo.dirstate.matches(match))
1244 return sorted(self._repo.dirstate.matches(match))
1245
1245
1246 def ancestors(self):
1246 def ancestors(self):
1247 for p in self._parents:
1247 for p in self._parents:
1248 yield p
1248 yield p
1249 for a in self._repo.changelog.ancestors(
1249 for a in self._repo.changelog.ancestors(
1250 [p.rev() for p in self._parents]):
1250 [p.rev() for p in self._parents]):
1251 yield changectx(self._repo, a)
1251 yield changectx(self._repo, a)
1252
1252
1253 def markcommitted(self, node):
1253 def markcommitted(self, node):
1254 """Perform post-commit cleanup necessary after committing this ctx
1254 """Perform post-commit cleanup necessary after committing this ctx
1255
1255
1256 Specifically, this updates backing stores this working context
1256 Specifically, this updates backing stores this working context
1257 wraps to reflect the fact that the changes reflected by this
1257 wraps to reflect the fact that the changes reflected by this
1258 workingctx have been committed. For example, it marks
1258 workingctx have been committed. For example, it marks
1259 modified and added files as normal in the dirstate.
1259 modified and added files as normal in the dirstate.
1260
1260
1261 """
1261 """
1262
1262
1263 self._repo.dirstate.beginparentchange()
1263 self._repo.dirstate.beginparentchange()
1264 for f in self.modified() + self.added():
1264 for f in self.modified() + self.added():
1265 self._repo.dirstate.normal(f)
1265 self._repo.dirstate.normal(f)
1266 for f in self.removed():
1266 for f in self.removed():
1267 self._repo.dirstate.drop(f)
1267 self._repo.dirstate.drop(f)
1268 self._repo.dirstate.setparents(node)
1268 self._repo.dirstate.setparents(node)
1269 self._repo.dirstate.endparentchange()
1269 self._repo.dirstate.endparentchange()
1270
1270
1271 def dirs(self):
1271 def dirs(self):
1272 return self._repo.dirstate.dirs()
1272 return self._repo.dirstate.dirs()
1273
1273
1274 class workingctx(committablectx):
1274 class workingctx(committablectx):
1275 """A workingctx object makes access to data related to
1275 """A workingctx object makes access to data related to
1276 the current working directory convenient.
1276 the current working directory convenient.
1277 date - any valid date string or (unixtime, offset), or None.
1277 date - any valid date string or (unixtime, offset), or None.
1278 user - username string, or None.
1278 user - username string, or None.
1279 extra - a dictionary of extra values, or None.
1279 extra - a dictionary of extra values, or None.
1280 changes - a list of file lists as returned by localrepo.status()
1280 changes - a list of file lists as returned by localrepo.status()
1281 or None to use the repository status.
1281 or None to use the repository status.
1282 """
1282 """
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1283 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 changes=None):
1284 changes=None):
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1285 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1286
1286
1287 def __iter__(self):
1287 def __iter__(self):
1288 d = self._repo.dirstate
1288 d = self._repo.dirstate
1289 for f in d:
1289 for f in d:
1290 if d[f] != 'r':
1290 if d[f] != 'r':
1291 yield f
1291 yield f
1292
1292
1293 def __contains__(self, key):
1293 def __contains__(self, key):
1294 return self._repo.dirstate[key] not in "?r"
1294 return self._repo.dirstate[key] not in "?r"
1295
1295
1296 @propertycache
1296 @propertycache
1297 def _parents(self):
1297 def _parents(self):
1298 p = self._repo.dirstate.parents()
1298 p = self._repo.dirstate.parents()
1299 if p[1] == nullid:
1299 if p[1] == nullid:
1300 p = p[:-1]
1300 p = p[:-1]
1301 return [changectx(self._repo, x) for x in p]
1301 return [changectx(self._repo, x) for x in p]
1302
1302
1303 def filectx(self, path, filelog=None):
1303 def filectx(self, path, filelog=None):
1304 """get a file context from the working directory"""
1304 """get a file context from the working directory"""
1305 return workingfilectx(self._repo, path, workingctx=self,
1305 return workingfilectx(self._repo, path, workingctx=self,
1306 filelog=filelog)
1306 filelog=filelog)
1307
1307
1308 def dirty(self, missing=False, merge=True, branch=True):
1308 def dirty(self, missing=False, merge=True, branch=True):
1309 "check whether a working directory is modified"
1309 "check whether a working directory is modified"
1310 # check subrepos first
1310 # check subrepos first
1311 for s in sorted(self.substate):
1311 for s in sorted(self.substate):
1312 if self.sub(s).dirty():
1312 if self.sub(s).dirty():
1313 return True
1313 return True
1314 # check current working dir
1314 # check current working dir
1315 return ((merge and self.p2()) or
1315 return ((merge and self.p2()) or
1316 (branch and self.branch() != self.p1().branch()) or
1316 (branch and self.branch() != self.p1().branch()) or
1317 self.modified() or self.added() or self.removed() or
1317 self.modified() or self.added() or self.removed() or
1318 (missing and self.deleted()))
1318 (missing and self.deleted()))
1319
1319
1320 def add(self, list, prefix=""):
1320 def add(self, list, prefix=""):
1321 join = lambda f: os.path.join(prefix, f)
1321 join = lambda f: os.path.join(prefix, f)
1322 wlock = self._repo.wlock()
1322 wlock = self._repo.wlock()
1323 ui, ds = self._repo.ui, self._repo.dirstate
1323 ui, ds = self._repo.ui, self._repo.dirstate
1324 try:
1324 try:
1325 rejected = []
1325 rejected = []
1326 lstat = self._repo.wvfs.lstat
1326 lstat = self._repo.wvfs.lstat
1327 for f in list:
1327 for f in list:
1328 scmutil.checkportable(ui, join(f))
1328 scmutil.checkportable(ui, join(f))
1329 try:
1329 try:
1330 st = lstat(f)
1330 st = lstat(f)
1331 except OSError:
1331 except OSError:
1332 ui.warn(_("%s does not exist!\n") % join(f))
1332 ui.warn(_("%s does not exist!\n") % join(f))
1333 rejected.append(f)
1333 rejected.append(f)
1334 continue
1334 continue
1335 if st.st_size > 10000000:
1335 if st.st_size > 10000000:
1336 ui.warn(_("%s: up to %d MB of RAM may be required "
1336 ui.warn(_("%s: up to %d MB of RAM may be required "
1337 "to manage this file\n"
1337 "to manage this file\n"
1338 "(use 'hg revert %s' to cancel the "
1338 "(use 'hg revert %s' to cancel the "
1339 "pending addition)\n")
1339 "pending addition)\n")
1340 % (f, 3 * st.st_size // 1000000, join(f)))
1340 % (f, 3 * st.st_size // 1000000, join(f)))
1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 ui.warn(_("%s not added: only files and symlinks "
1342 ui.warn(_("%s not added: only files and symlinks "
1343 "supported currently\n") % join(f))
1343 "supported currently\n") % join(f))
1344 rejected.append(f)
1344 rejected.append(f)
1345 elif ds[f] in 'amn':
1345 elif ds[f] in 'amn':
1346 ui.warn(_("%s already tracked!\n") % join(f))
1346 ui.warn(_("%s already tracked!\n") % join(f))
1347 elif ds[f] == 'r':
1347 elif ds[f] == 'r':
1348 ds.normallookup(f)
1348 ds.normallookup(f)
1349 else:
1349 else:
1350 ds.add(f)
1350 ds.add(f)
1351 return rejected
1351 return rejected
1352 finally:
1352 finally:
1353 wlock.release()
1353 wlock.release()
1354
1354
1355 def forget(self, files, prefix=""):
1355 def forget(self, files, prefix=""):
1356 join = lambda f: os.path.join(prefix, f)
1356 join = lambda f: os.path.join(prefix, f)
1357 wlock = self._repo.wlock()
1357 wlock = self._repo.wlock()
1358 try:
1358 try:
1359 rejected = []
1359 rejected = []
1360 for f in files:
1360 for f in files:
1361 if f not in self._repo.dirstate:
1361 if f not in self._repo.dirstate:
1362 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1362 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1363 rejected.append(f)
1363 rejected.append(f)
1364 elif self._repo.dirstate[f] != 'a':
1364 elif self._repo.dirstate[f] != 'a':
1365 self._repo.dirstate.remove(f)
1365 self._repo.dirstate.remove(f)
1366 else:
1366 else:
1367 self._repo.dirstate.drop(f)
1367 self._repo.dirstate.drop(f)
1368 return rejected
1368 return rejected
1369 finally:
1369 finally:
1370 wlock.release()
1370 wlock.release()
1371
1371
1372 def undelete(self, list):
1372 def undelete(self, list):
1373 pctxs = self.parents()
1373 pctxs = self.parents()
1374 wlock = self._repo.wlock()
1374 wlock = self._repo.wlock()
1375 try:
1375 try:
1376 for f in list:
1376 for f in list:
1377 if self._repo.dirstate[f] != 'r':
1377 if self._repo.dirstate[f] != 'r':
1378 self._repo.ui.warn(_("%s not removed!\n") % f)
1378 self._repo.ui.warn(_("%s not removed!\n") % f)
1379 else:
1379 else:
1380 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1380 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1381 t = fctx.data()
1381 t = fctx.data()
1382 self._repo.wwrite(f, t, fctx.flags())
1382 self._repo.wwrite(f, t, fctx.flags())
1383 self._repo.dirstate.normal(f)
1383 self._repo.dirstate.normal(f)
1384 finally:
1384 finally:
1385 wlock.release()
1385 wlock.release()
1386
1386
1387 def copy(self, source, dest):
1387 def copy(self, source, dest):
1388 try:
1388 try:
1389 st = self._repo.wvfs.lstat(dest)
1389 st = self._repo.wvfs.lstat(dest)
1390 except OSError, err:
1390 except OSError, err:
1391 if err.errno != errno.ENOENT:
1391 if err.errno != errno.ENOENT:
1392 raise
1392 raise
1393 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1393 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1394 return
1394 return
1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1395 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1396 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1396 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1397 "symbolic link\n") % dest)
1397 "symbolic link\n") % dest)
1398 else:
1398 else:
1399 wlock = self._repo.wlock()
1399 wlock = self._repo.wlock()
1400 try:
1400 try:
1401 if self._repo.dirstate[dest] in '?':
1401 if self._repo.dirstate[dest] in '?':
1402 self._repo.dirstate.add(dest)
1402 self._repo.dirstate.add(dest)
1403 elif self._repo.dirstate[dest] in 'r':
1403 elif self._repo.dirstate[dest] in 'r':
1404 self._repo.dirstate.normallookup(dest)
1404 self._repo.dirstate.normallookup(dest)
1405 self._repo.dirstate.copy(source, dest)
1405 self._repo.dirstate.copy(source, dest)
1406 finally:
1406 finally:
1407 wlock.release()
1407 wlock.release()
1408
1408
1409 def _filtersuspectsymlink(self, files):
1409 def _filtersuspectsymlink(self, files):
1410 if not files or self._repo.dirstate._checklink:
1410 if not files or self._repo.dirstate._checklink:
1411 return files
1411 return files
1412
1412
1413 # Symlink placeholders may get non-symlink-like contents
1413 # Symlink placeholders may get non-symlink-like contents
1414 # via user error or dereferencing by NFS or Samba servers,
1414 # via user error or dereferencing by NFS or Samba servers,
1415 # so we filter out any placeholders that don't look like a
1415 # so we filter out any placeholders that don't look like a
1416 # symlink
1416 # symlink
1417 sane = []
1417 sane = []
1418 for f in files:
1418 for f in files:
1419 if self.flags(f) == 'l':
1419 if self.flags(f) == 'l':
1420 d = self[f].data()
1420 d = self[f].data()
1421 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1421 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1422 self._repo.ui.debug('ignoring suspect symlink placeholder'
1422 self._repo.ui.debug('ignoring suspect symlink placeholder'
1423 ' "%s"\n' % f)
1423 ' "%s"\n' % f)
1424 continue
1424 continue
1425 sane.append(f)
1425 sane.append(f)
1426 return sane
1426 return sane
1427
1427
1428 def _checklookup(self, files):
1428 def _checklookup(self, files):
1429 # check for any possibly clean files
1429 # check for any possibly clean files
1430 if not files:
1430 if not files:
1431 return [], []
1431 return [], []
1432
1432
1433 modified = []
1433 modified = []
1434 fixup = []
1434 fixup = []
1435 pctx = self._parents[0]
1435 pctx = self._parents[0]
1436 # do a full compare of any files that might have changed
1436 # do a full compare of any files that might have changed
1437 for f in sorted(files):
1437 for f in sorted(files):
1438 if (f not in pctx or self.flags(f) != pctx.flags(f)
1438 if (f not in pctx or self.flags(f) != pctx.flags(f)
1439 or pctx[f].cmp(self[f])):
1439 or pctx[f].cmp(self[f])):
1440 modified.append(f)
1440 modified.append(f)
1441 else:
1441 else:
1442 fixup.append(f)
1442 fixup.append(f)
1443
1443
1444 # update dirstate for files that are actually clean
1444 # update dirstate for files that are actually clean
1445 if fixup:
1445 if fixup:
1446 try:
1446 try:
1447 # updating the dirstate is optional
1447 # updating the dirstate is optional
1448 # so we don't wait on the lock
1448 # so we don't wait on the lock
1449 # wlock can invalidate the dirstate, so cache normal _after_
1449 # wlock can invalidate the dirstate, so cache normal _after_
1450 # taking the lock
1450 # taking the lock
1451 wlock = self._repo.wlock(False)
1451 wlock = self._repo.wlock(False)
1452 normal = self._repo.dirstate.normal
1452 normal = self._repo.dirstate.normal
1453 try:
1453 try:
1454 for f in fixup:
1454 for f in fixup:
1455 normal(f)
1455 normal(f)
1456 finally:
1456 finally:
1457 wlock.release()
1457 wlock.release()
1458 except error.LockError:
1458 except error.LockError:
1459 pass
1459 pass
1460 return modified, fixup
1460 return modified, fixup
1461
1461
1462 def _manifestmatches(self, match, s):
1462 def _manifestmatches(self, match, s):
1463 """Slow path for workingctx
1463 """Slow path for workingctx
1464
1464
1465 The fast path is when we compare the working directory to its parent
1465 The fast path is when we compare the working directory to its parent
1466 which means this function is comparing with a non-parent; therefore we
1466 which means this function is comparing with a non-parent; therefore we
1467 need to build a manifest and return what matches.
1467 need to build a manifest and return what matches.
1468 """
1468 """
1469 mf = self._repo['.']._manifestmatches(match, s)
1469 mf = self._repo['.']._manifestmatches(match, s)
1470 for f in s.modified + s.added:
1470 for f in s.modified + s.added:
1471 mf[f] = _newnode
1471 mf[f] = _newnode
1472 mf.setflag(f, self.flags(f))
1472 mf.setflag(f, self.flags(f))
1473 for f in s.removed:
1473 for f in s.removed:
1474 if f in mf:
1474 if f in mf:
1475 del mf[f]
1475 del mf[f]
1476 return mf
1476 return mf
1477
1477
1478 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1478 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1479 unknown=False):
1479 unknown=False):
1480 '''Gets the status from the dirstate -- internal use only.'''
1480 '''Gets the status from the dirstate -- internal use only.'''
1481 listignored, listclean, listunknown = ignored, clean, unknown
1481 listignored, listclean, listunknown = ignored, clean, unknown
1482 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1482 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1483 subrepos = []
1483 subrepos = []
1484 if '.hgsub' in self:
1484 if '.hgsub' in self:
1485 subrepos = sorted(self.substate)
1485 subrepos = sorted(self.substate)
1486 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1486 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1487 listclean, listunknown)
1487 listclean, listunknown)
1488
1488
1489 # check for any possibly clean files
1489 # check for any possibly clean files
1490 if cmp:
1490 if cmp:
1491 modified2, fixup = self._checklookup(cmp)
1491 modified2, fixup = self._checklookup(cmp)
1492 s.modified.extend(modified2)
1492 s.modified.extend(modified2)
1493
1493
1494 # update dirstate for files that are actually clean
1494 # update dirstate for files that are actually clean
1495 if fixup and listclean:
1495 if fixup and listclean:
1496 s.clean.extend(fixup)
1496 s.clean.extend(fixup)
1497
1497
1498 if match.always():
1498 if match.always():
1499 # cache for performance
1499 # cache for performance
1500 if s.unknown or s.ignored or s.clean:
1500 if s.unknown or s.ignored or s.clean:
1501 # "_status" is cached with list*=False in the normal route
1501 # "_status" is cached with list*=False in the normal route
1502 self._status = scmutil.status(s.modified, s.added, s.removed,
1502 self._status = scmutil.status(s.modified, s.added, s.removed,
1503 s.deleted, [], [], [])
1503 s.deleted, [], [], [])
1504 else:
1504 else:
1505 self._status = s
1505 self._status = s
1506
1506
1507 return s
1507 return s
1508
1508
1509 def _buildstatus(self, other, s, match, listignored, listclean,
1509 def _buildstatus(self, other, s, match, listignored, listclean,
1510 listunknown):
1510 listunknown):
1511 """build a status with respect to another context
1511 """build a status with respect to another context
1512
1512
1513 This includes logic for maintaining the fast path of status when
1513 This includes logic for maintaining the fast path of status when
1514 comparing the working directory against its parent, which is to skip
1514 comparing the working directory against its parent, which is to skip
1515 building a new manifest if self (working directory) is not comparing
1515 building a new manifest if self (working directory) is not comparing
1516 against its parent (repo['.']).
1516 against its parent (repo['.']).
1517 """
1517 """
1518 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1518 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1519 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1519 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1520 # might have accidentally ended up with the entire contents of the file
1520 # might have accidentally ended up with the entire contents of the file
1521 # they are supposed to be linking to.
1521 # they are supposed to be linking to.
1522 s.modified[:] = self._filtersuspectsymlink(s.modified)
1522 s.modified[:] = self._filtersuspectsymlink(s.modified)
1523 if other != self._repo['.']:
1523 if other != self._repo['.']:
1524 s = super(workingctx, self)._buildstatus(other, s, match,
1524 s = super(workingctx, self)._buildstatus(other, s, match,
1525 listignored, listclean,
1525 listignored, listclean,
1526 listunknown)
1526 listunknown)
1527 return s
1527 return s
1528
1528
1529 def _matchstatus(self, other, match):
1529 def _matchstatus(self, other, match):
1530 """override the match method with a filter for directory patterns
1530 """override the match method with a filter for directory patterns
1531
1531
1532 We use inheritance to customize the match.bad method only in cases of
1532 We use inheritance to customize the match.bad method only in cases of
1533 workingctx since it belongs only to the working directory when
1533 workingctx since it belongs only to the working directory when
1534 comparing against the parent changeset.
1534 comparing against the parent changeset.
1535
1535
1536 If we aren't comparing against the working directory's parent, then we
1536 If we aren't comparing against the working directory's parent, then we
1537 just use the default match object sent to us.
1537 just use the default match object sent to us.
1538 """
1538 """
1539 superself = super(workingctx, self)
1539 superself = super(workingctx, self)
1540 match = superself._matchstatus(other, match)
1540 match = superself._matchstatus(other, match)
1541 if other != self._repo['.']:
1541 if other != self._repo['.']:
1542 def bad(f, msg):
1542 def bad(f, msg):
1543 # 'f' may be a directory pattern from 'match.files()',
1543 # 'f' may be a directory pattern from 'match.files()',
1544 # so 'f not in ctx1' is not enough
1544 # so 'f not in ctx1' is not enough
1545 if f not in other and f not in other.dirs():
1545 if f not in other and f not in other.dirs():
1546 self._repo.ui.warn('%s: %s\n' %
1546 self._repo.ui.warn('%s: %s\n' %
1547 (self._repo.dirstate.pathto(f), msg))
1547 (self._repo.dirstate.pathto(f), msg))
1548 match.bad = bad
1548 match.bad = bad
1549 return match
1549 return match
1550
1550
1551 class committablefilectx(basefilectx):
1551 class committablefilectx(basefilectx):
1552 """A committablefilectx provides common functionality for a file context
1552 """A committablefilectx provides common functionality for a file context
1553 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1553 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1554 def __init__(self, repo, path, filelog=None, ctx=None):
1554 def __init__(self, repo, path, filelog=None, ctx=None):
1555 self._repo = repo
1555 self._repo = repo
1556 self._path = path
1556 self._path = path
1557 self._changeid = None
1557 self._changeid = None
1558 self._filerev = self._filenode = None
1558 self._filerev = self._filenode = None
1559
1559
1560 if filelog is not None:
1560 if filelog is not None:
1561 self._filelog = filelog
1561 self._filelog = filelog
1562 if ctx:
1562 if ctx:
1563 self._changectx = ctx
1563 self._changectx = ctx
1564
1564
1565 def __nonzero__(self):
1565 def __nonzero__(self):
1566 return True
1566 return True
1567
1567
1568 def parents(self):
1568 def parents(self):
1569 '''return parent filectxs, following copies if necessary'''
1569 '''return parent filectxs, following copies if necessary'''
1570 def filenode(ctx, path):
1570 def filenode(ctx, path):
1571 return ctx._manifest.get(path, nullid)
1571 return ctx._manifest.get(path, nullid)
1572
1572
1573 path = self._path
1573 path = self._path
1574 fl = self._filelog
1574 fl = self._filelog
1575 pcl = self._changectx._parents
1575 pcl = self._changectx._parents
1576 renamed = self.renamed()
1576 renamed = self.renamed()
1577
1577
1578 if renamed:
1578 if renamed:
1579 pl = [renamed + (None,)]
1579 pl = [renamed + (None,)]
1580 else:
1580 else:
1581 pl = [(path, filenode(pcl[0], path), fl)]
1581 pl = [(path, filenode(pcl[0], path), fl)]
1582
1582
1583 for pc in pcl[1:]:
1583 for pc in pcl[1:]:
1584 pl.append((path, filenode(pc, path), fl))
1584 pl.append((path, filenode(pc, path), fl))
1585
1585
1586 return [filectx(self._repo, p, fileid=n, filelog=l)
1586 return [filectx(self._repo, p, fileid=n, filelog=l)
1587 for p, n, l in pl if n != nullid]
1587 for p, n, l in pl if n != nullid]
1588
1588
1589 def children(self):
1589 def children(self):
1590 return []
1590 return []
1591
1591
1592 class workingfilectx(committablefilectx):
1592 class workingfilectx(committablefilectx):
1593 """A workingfilectx object makes access to data related to a particular
1593 """A workingfilectx object makes access to data related to a particular
1594 file in the working directory convenient."""
1594 file in the working directory convenient."""
1595 def __init__(self, repo, path, filelog=None, workingctx=None):
1595 def __init__(self, repo, path, filelog=None, workingctx=None):
1596 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1596 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1597
1597
1598 @propertycache
1598 @propertycache
1599 def _changectx(self):
1599 def _changectx(self):
1600 return workingctx(self._repo)
1600 return workingctx(self._repo)
1601
1601
1602 def data(self):
1602 def data(self):
1603 return self._repo.wread(self._path)
1603 return self._repo.wread(self._path)
1604 def renamed(self):
1604 def renamed(self):
1605 rp = self._repo.dirstate.copied(self._path)
1605 rp = self._repo.dirstate.copied(self._path)
1606 if not rp:
1606 if not rp:
1607 return None
1607 return None
1608 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1608 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1609
1609
1610 def size(self):
1610 def size(self):
1611 return self._repo.wvfs.lstat(self._path).st_size
1611 return self._repo.wvfs.lstat(self._path).st_size
1612 def date(self):
1612 def date(self):
1613 t, tz = self._changectx.date()
1613 t, tz = self._changectx.date()
1614 try:
1614 try:
1615 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1615 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1616 except OSError, err:
1616 except OSError, err:
1617 if err.errno != errno.ENOENT:
1617 if err.errno != errno.ENOENT:
1618 raise
1618 raise
1619 return (t, tz)
1619 return (t, tz)
1620
1620
1621 def cmp(self, fctx):
1621 def cmp(self, fctx):
1622 """compare with other file context
1622 """compare with other file context
1623
1623
1624 returns True if different than fctx.
1624 returns True if different than fctx.
1625 """
1625 """
1626 # fctx should be a filectx (not a workingfilectx)
1626 # fctx should be a filectx (not a workingfilectx)
1627 # invert comparison to reuse the same code path
1627 # invert comparison to reuse the same code path
1628 return fctx.cmp(self)
1628 return fctx.cmp(self)
1629
1629
1630 def remove(self, ignoremissing=False):
1630 def remove(self, ignoremissing=False):
1631 """wraps unlink for a repo's working directory"""
1631 """wraps unlink for a repo's working directory"""
1632 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1632 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1633
1633
1634 def write(self, data, flags):
1634 def write(self, data, flags):
1635 """wraps repo.wwrite"""
1635 """wraps repo.wwrite"""
1636 self._repo.wwrite(self._path, data, flags)
1636 self._repo.wwrite(self._path, data, flags)
1637
1637
1638 class workingcommitctx(workingctx):
1638 class workingcommitctx(workingctx):
1639 """A workingcommitctx object makes access to data related to
1639 """A workingcommitctx object makes access to data related to
1640 the revision being committed convenient.
1640 the revision being committed convenient.
1641
1641
1642 This hides changes in the working directory, if they aren't
1642 This hides changes in the working directory, if they aren't
1643 committed in this context.
1643 committed in this context.
1644 """
1644 """
1645 def __init__(self, repo, changes,
1645 def __init__(self, repo, changes,
1646 text="", user=None, date=None, extra=None):
1646 text="", user=None, date=None, extra=None):
1647 super(workingctx, self).__init__(repo, text, user, date, extra,
1647 super(workingctx, self).__init__(repo, text, user, date, extra,
1648 changes)
1648 changes)
1649
1649
1650 def _buildstatus(self, other, s, match,
1651 listignored, listclean, listunknown):
1652 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1653 """
1654 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1655 if other != self._repo['.']:
1656 # workingctx._buildstatus doesn't change self._status in this case
1657 superself = super(workingcommitctx, self)
1658 s = superself._buildstatus(other, s, match,
1659 listignored, listclean, listunknown)
1660 return s
1661
1662 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1650 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1663 unknown=False):
1651 unknown=False):
1664 """Return matched files only in ``self._status``
1652 """Return matched files only in ``self._status``
1665
1653
1666 Uncommitted files appear "clean" via this context, even if
1654 Uncommitted files appear "clean" via this context, even if
1667 they aren't actually so in the working directory.
1655 they aren't actually so in the working directory.
1668 """
1656 """
1669 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1657 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1670 if clean:
1658 if clean:
1671 clean = [f for f in self._manifest if f not in self._changedset]
1659 clean = [f for f in self._manifest if f not in self._changedset]
1672 else:
1660 else:
1673 clean = []
1661 clean = []
1674 return scmutil.status([f for f in self._status.modified if match(f)],
1662 return scmutil.status([f for f in self._status.modified if match(f)],
1675 [f for f in self._status.added if match(f)],
1663 [f for f in self._status.added if match(f)],
1676 [f for f in self._status.removed if match(f)],
1664 [f for f in self._status.removed if match(f)],
1677 [], [], [], clean)
1665 [], [], [], clean)
1678
1666
1679 @propertycache
1667 @propertycache
1680 def _changedset(self):
1668 def _changedset(self):
1681 """Return the set of files changed in this context
1669 """Return the set of files changed in this context
1682 """
1670 """
1683 changed = set(self._status.modified)
1671 changed = set(self._status.modified)
1684 changed.update(self._status.added)
1672 changed.update(self._status.added)
1685 changed.update(self._status.removed)
1673 changed.update(self._status.removed)
1686 return changed
1674 return changed
1687
1675
1688 class memctx(committablectx):
1676 class memctx(committablectx):
1689 """Use memctx to perform in-memory commits via localrepo.commitctx().
1677 """Use memctx to perform in-memory commits via localrepo.commitctx().
1690
1678
1691 Revision information is supplied at initialization time while
1679 Revision information is supplied at initialization time while
1692 related files data and is made available through a callback
1680 related files data and is made available through a callback
1693 mechanism. 'repo' is the current localrepo, 'parents' is a
1681 mechanism. 'repo' is the current localrepo, 'parents' is a
1694 sequence of two parent revisions identifiers (pass None for every
1682 sequence of two parent revisions identifiers (pass None for every
1695 missing parent), 'text' is the commit message and 'files' lists
1683 missing parent), 'text' is the commit message and 'files' lists
1696 names of files touched by the revision (normalized and relative to
1684 names of files touched by the revision (normalized and relative to
1697 repository root).
1685 repository root).
1698
1686
1699 filectxfn(repo, memctx, path) is a callable receiving the
1687 filectxfn(repo, memctx, path) is a callable receiving the
1700 repository, the current memctx object and the normalized path of
1688 repository, the current memctx object and the normalized path of
1701 requested file, relative to repository root. It is fired by the
1689 requested file, relative to repository root. It is fired by the
1702 commit function for every file in 'files', but calls order is
1690 commit function for every file in 'files', but calls order is
1703 undefined. If the file is available in the revision being
1691 undefined. If the file is available in the revision being
1704 committed (updated or added), filectxfn returns a memfilectx
1692 committed (updated or added), filectxfn returns a memfilectx
1705 object. If the file was removed, filectxfn raises an
1693 object. If the file was removed, filectxfn raises an
1706 IOError. Moved files are represented by marking the source file
1694 IOError. Moved files are represented by marking the source file
1707 removed and the new file added with copy information (see
1695 removed and the new file added with copy information (see
1708 memfilectx).
1696 memfilectx).
1709
1697
1710 user receives the committer name and defaults to current
1698 user receives the committer name and defaults to current
1711 repository username, date is the commit date in any format
1699 repository username, date is the commit date in any format
1712 supported by util.parsedate() and defaults to current date, extra
1700 supported by util.parsedate() and defaults to current date, extra
1713 is a dictionary of metadata or is left empty.
1701 is a dictionary of metadata or is left empty.
1714 """
1702 """
1715
1703
1716 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1704 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1717 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1705 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1718 # this field to determine what to do in filectxfn.
1706 # this field to determine what to do in filectxfn.
1719 _returnnoneformissingfiles = True
1707 _returnnoneformissingfiles = True
1720
1708
1721 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1709 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1722 date=None, extra=None, editor=False):
1710 date=None, extra=None, editor=False):
1723 super(memctx, self).__init__(repo, text, user, date, extra)
1711 super(memctx, self).__init__(repo, text, user, date, extra)
1724 self._rev = None
1712 self._rev = None
1725 self._node = None
1713 self._node = None
1726 parents = [(p or nullid) for p in parents]
1714 parents = [(p or nullid) for p in parents]
1727 p1, p2 = parents
1715 p1, p2 = parents
1728 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1716 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1729 files = sorted(set(files))
1717 files = sorted(set(files))
1730 self._files = files
1718 self._files = files
1731 self.substate = {}
1719 self.substate = {}
1732
1720
1733 # if store is not callable, wrap it in a function
1721 # if store is not callable, wrap it in a function
1734 if not callable(filectxfn):
1722 if not callable(filectxfn):
1735 def getfilectx(repo, memctx, path):
1723 def getfilectx(repo, memctx, path):
1736 fctx = filectxfn[path]
1724 fctx = filectxfn[path]
1737 # this is weird but apparently we only keep track of one parent
1725 # this is weird but apparently we only keep track of one parent
1738 # (why not only store that instead of a tuple?)
1726 # (why not only store that instead of a tuple?)
1739 copied = fctx.renamed()
1727 copied = fctx.renamed()
1740 if copied:
1728 if copied:
1741 copied = copied[0]
1729 copied = copied[0]
1742 return memfilectx(repo, path, fctx.data(),
1730 return memfilectx(repo, path, fctx.data(),
1743 islink=fctx.islink(), isexec=fctx.isexec(),
1731 islink=fctx.islink(), isexec=fctx.isexec(),
1744 copied=copied, memctx=memctx)
1732 copied=copied, memctx=memctx)
1745 self._filectxfn = getfilectx
1733 self._filectxfn = getfilectx
1746 else:
1734 else:
1747 # "util.cachefunc" reduces invocation of possibly expensive
1735 # "util.cachefunc" reduces invocation of possibly expensive
1748 # "filectxfn" for performance (e.g. converting from another VCS)
1736 # "filectxfn" for performance (e.g. converting from another VCS)
1749 self._filectxfn = util.cachefunc(filectxfn)
1737 self._filectxfn = util.cachefunc(filectxfn)
1750
1738
1751 self._extra = extra and extra.copy() or {}
1739 self._extra = extra and extra.copy() or {}
1752 if self._extra.get('branch', '') == '':
1740 if self._extra.get('branch', '') == '':
1753 self._extra['branch'] = 'default'
1741 self._extra['branch'] = 'default'
1754
1742
1755 if editor:
1743 if editor:
1756 self._text = editor(self._repo, self, [])
1744 self._text = editor(self._repo, self, [])
1757 self._repo.savecommitmessage(self._text)
1745 self._repo.savecommitmessage(self._text)
1758
1746
1759 def filectx(self, path, filelog=None):
1747 def filectx(self, path, filelog=None):
1760 """get a file context from the working directory
1748 """get a file context from the working directory
1761
1749
1762 Returns None if file doesn't exist and should be removed."""
1750 Returns None if file doesn't exist and should be removed."""
1763 return self._filectxfn(self._repo, self, path)
1751 return self._filectxfn(self._repo, self, path)
1764
1752
1765 def commit(self):
1753 def commit(self):
1766 """commit context to the repo"""
1754 """commit context to the repo"""
1767 return self._repo.commitctx(self)
1755 return self._repo.commitctx(self)
1768
1756
1769 @propertycache
1757 @propertycache
1770 def _manifest(self):
1758 def _manifest(self):
1771 """generate a manifest based on the return values of filectxfn"""
1759 """generate a manifest based on the return values of filectxfn"""
1772
1760
1773 # keep this simple for now; just worry about p1
1761 # keep this simple for now; just worry about p1
1774 pctx = self._parents[0]
1762 pctx = self._parents[0]
1775 man = pctx.manifest().copy()
1763 man = pctx.manifest().copy()
1776
1764
1777 for f in self._status.modified:
1765 for f in self._status.modified:
1778 p1node = nullid
1766 p1node = nullid
1779 p2node = nullid
1767 p2node = nullid
1780 p = pctx[f].parents() # if file isn't in pctx, check p2?
1768 p = pctx[f].parents() # if file isn't in pctx, check p2?
1781 if len(p) > 0:
1769 if len(p) > 0:
1782 p1node = p[0].node()
1770 p1node = p[0].node()
1783 if len(p) > 1:
1771 if len(p) > 1:
1784 p2node = p[1].node()
1772 p2node = p[1].node()
1785 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1773 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1786
1774
1787 for f in self._status.added:
1775 for f in self._status.added:
1788 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1776 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1789
1777
1790 for f in self._status.removed:
1778 for f in self._status.removed:
1791 if f in man:
1779 if f in man:
1792 del man[f]
1780 del man[f]
1793
1781
1794 return man
1782 return man
1795
1783
1796 @propertycache
1784 @propertycache
1797 def _status(self):
1785 def _status(self):
1798 """Calculate exact status from ``files`` specified at construction
1786 """Calculate exact status from ``files`` specified at construction
1799 """
1787 """
1800 man1 = self.p1().manifest()
1788 man1 = self.p1().manifest()
1801 p2 = self._parents[1]
1789 p2 = self._parents[1]
1802 # "1 < len(self._parents)" can't be used for checking
1790 # "1 < len(self._parents)" can't be used for checking
1803 # existence of the 2nd parent, because "memctx._parents" is
1791 # existence of the 2nd parent, because "memctx._parents" is
1804 # explicitly initialized by the list, of which length is 2.
1792 # explicitly initialized by the list, of which length is 2.
1805 if p2.node() != nullid:
1793 if p2.node() != nullid:
1806 man2 = p2.manifest()
1794 man2 = p2.manifest()
1807 managing = lambda f: f in man1 or f in man2
1795 managing = lambda f: f in man1 or f in man2
1808 else:
1796 else:
1809 managing = lambda f: f in man1
1797 managing = lambda f: f in man1
1810
1798
1811 modified, added, removed = [], [], []
1799 modified, added, removed = [], [], []
1812 for f in self._files:
1800 for f in self._files:
1813 if not managing(f):
1801 if not managing(f):
1814 added.append(f)
1802 added.append(f)
1815 elif self[f]:
1803 elif self[f]:
1816 modified.append(f)
1804 modified.append(f)
1817 else:
1805 else:
1818 removed.append(f)
1806 removed.append(f)
1819
1807
1820 return scmutil.status(modified, added, removed, [], [], [], [])
1808 return scmutil.status(modified, added, removed, [], [], [], [])
1821
1809
1822 class memfilectx(committablefilectx):
1810 class memfilectx(committablefilectx):
1823 """memfilectx represents an in-memory file to commit.
1811 """memfilectx represents an in-memory file to commit.
1824
1812
1825 See memctx and committablefilectx for more details.
1813 See memctx and committablefilectx for more details.
1826 """
1814 """
1827 def __init__(self, repo, path, data, islink=False,
1815 def __init__(self, repo, path, data, islink=False,
1828 isexec=False, copied=None, memctx=None):
1816 isexec=False, copied=None, memctx=None):
1829 """
1817 """
1830 path is the normalized file path relative to repository root.
1818 path is the normalized file path relative to repository root.
1831 data is the file content as a string.
1819 data is the file content as a string.
1832 islink is True if the file is a symbolic link.
1820 islink is True if the file is a symbolic link.
1833 isexec is True if the file is executable.
1821 isexec is True if the file is executable.
1834 copied is the source file path if current file was copied in the
1822 copied is the source file path if current file was copied in the
1835 revision being committed, or None."""
1823 revision being committed, or None."""
1836 super(memfilectx, self).__init__(repo, path, None, memctx)
1824 super(memfilectx, self).__init__(repo, path, None, memctx)
1837 self._data = data
1825 self._data = data
1838 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1826 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1839 self._copied = None
1827 self._copied = None
1840 if copied:
1828 if copied:
1841 self._copied = (copied, nullid)
1829 self._copied = (copied, nullid)
1842
1830
1843 def data(self):
1831 def data(self):
1844 return self._data
1832 return self._data
1845 def size(self):
1833 def size(self):
1846 return len(self.data())
1834 return len(self.data())
1847 def flags(self):
1835 def flags(self):
1848 return self._flags
1836 return self._flags
1849 def renamed(self):
1837 def renamed(self):
1850 return self._copied
1838 return self._copied
1851
1839
1852 def remove(self, ignoremissing=False):
1840 def remove(self, ignoremissing=False):
1853 """wraps unlink for a repo's working directory"""
1841 """wraps unlink for a repo's working directory"""
1854 # need to figure out what to do here
1842 # need to figure out what to do here
1855 del self._changectx[self._path]
1843 del self._changectx[self._path]
1856
1844
1857 def write(self, data, flags):
1845 def write(self, data, flags):
1858 """wraps repo.wwrite"""
1846 """wraps repo.wwrite"""
1859 self._data = data
1847 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now