##// END OF EJS Templates
context: use manifest.diff() to compute most of status...
Augie Fackler -
r23755:d43948a9 default
parent child Browse files
Show More
@@ -1,1849 +1,1860 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 """return the first ancestor of <srcrev> introducting <fnode>
26 """return the first ancestor of <srcrev> introducting <fnode>
27
27
28 If the linkrev of the file revision does not point to an ancestor of
28 If the linkrev of the file revision does not point to an ancestor of
29 srcrev, we'll walk down the ancestors until we find one introducing this
29 srcrev, we'll walk down the ancestors until we find one introducing this
30 file revision.
30 file revision.
31
31
32 :repo: a localrepository object (used to access changelog and manifest)
32 :repo: a localrepository object (used to access changelog and manifest)
33 :path: the file path
33 :path: the file path
34 :fnode: the nodeid of the file revision
34 :fnode: the nodeid of the file revision
35 :filelog: the filelog of this path
35 :filelog: the filelog of this path
36 :srcrev: the changeset revision we search ancestors from
36 :srcrev: the changeset revision we search ancestors from
37 :inclusive: if true, the src revision will also be checked
37 :inclusive: if true, the src revision will also be checked
38 """
38 """
39 cl = repo.unfiltered().changelog
39 cl = repo.unfiltered().changelog
40 ma = repo.manifest
40 ma = repo.manifest
41 # fetch the linkrev
41 # fetch the linkrev
42 fr = filelog.rev(fnode)
42 fr = filelog.rev(fnode)
43 lkr = filelog.linkrev(fr)
43 lkr = filelog.linkrev(fr)
44 # check if this linkrev is an ancestor of srcrev
44 # check if this linkrev is an ancestor of srcrev
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 if lkr not in anc:
46 if lkr not in anc:
47 for a in anc:
47 for a in anc:
48 ac = cl.read(a) # get changeset data (we avoid object creation).
48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 if path in ac[3]: # checking the 'files' field.
49 if path in ac[3]: # checking the 'files' field.
50 # The file has been touched, check if the content is similar
50 # The file has been touched, check if the content is similar
51 # to the one we search for.
51 # to the one we search for.
52 if fnode == ma.readdelta(ac[0]).get(path):
52 if fnode == ma.readdelta(ac[0]).get(path):
53 return a
53 return a
54 # In theory, we should never get out of that loop without a result. But
54 # In theory, we should never get out of that loop without a result. But
55 # if manifest uses a buggy file revision (not children of the one it
55 # if manifest uses a buggy file revision (not children of the one it
56 # replaces) we could. Such a buggy situation will likely result is crash
56 # replaces) we could. Such a buggy situation will likely result is crash
57 # somewhere else at to some point.
57 # somewhere else at to some point.
58 return lkr
58 return lkr
59
59
60 class basectx(object):
60 class basectx(object):
61 """A basectx object represents the common logic for its children:
61 """A basectx object represents the common logic for its children:
62 changectx: read-only context that is already present in the repo,
62 changectx: read-only context that is already present in the repo,
63 workingctx: a context that represents the working directory and can
63 workingctx: a context that represents the working directory and can
64 be committed,
64 be committed,
65 memctx: a context that represents changes in-memory and can also
65 memctx: a context that represents changes in-memory and can also
66 be committed."""
66 be committed."""
67 def __new__(cls, repo, changeid='', *args, **kwargs):
67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 if isinstance(changeid, basectx):
68 if isinstance(changeid, basectx):
69 return changeid
69 return changeid
70
70
71 o = super(basectx, cls).__new__(cls)
71 o = super(basectx, cls).__new__(cls)
72
72
73 o._repo = repo
73 o._repo = repo
74 o._rev = nullrev
74 o._rev = nullrev
75 o._node = nullid
75 o._node = nullid
76
76
77 return o
77 return o
78
78
79 def __str__(self):
79 def __str__(self):
80 return short(self.node())
80 return short(self.node())
81
81
82 def __int__(self):
82 def __int__(self):
83 return self.rev()
83 return self.rev()
84
84
85 def __repr__(self):
85 def __repr__(self):
86 return "<%s %s>" % (type(self).__name__, str(self))
86 return "<%s %s>" % (type(self).__name__, str(self))
87
87
88 def __eq__(self, other):
88 def __eq__(self, other):
89 try:
89 try:
90 return type(self) == type(other) and self._rev == other._rev
90 return type(self) == type(other) and self._rev == other._rev
91 except AttributeError:
91 except AttributeError:
92 return False
92 return False
93
93
94 def __ne__(self, other):
94 def __ne__(self, other):
95 return not (self == other)
95 return not (self == other)
96
96
97 def __contains__(self, key):
97 def __contains__(self, key):
98 return key in self._manifest
98 return key in self._manifest
99
99
100 def __getitem__(self, key):
100 def __getitem__(self, key):
101 return self.filectx(key)
101 return self.filectx(key)
102
102
103 def __iter__(self):
103 def __iter__(self):
104 for f in sorted(self._manifest):
104 for f in sorted(self._manifest):
105 yield f
105 yield f
106
106
107 def _manifestmatches(self, match, s):
107 def _manifestmatches(self, match, s):
108 """generate a new manifest filtered by the match argument
108 """generate a new manifest filtered by the match argument
109
109
110 This method is for internal use only and mainly exists to provide an
110 This method is for internal use only and mainly exists to provide an
111 object oriented way for other contexts to customize the manifest
111 object oriented way for other contexts to customize the manifest
112 generation.
112 generation.
113 """
113 """
114 return self.manifest().matches(match)
114 return self.manifest().matches(match)
115
115
116 def _matchstatus(self, other, match):
116 def _matchstatus(self, other, match):
117 """return match.always if match is none
117 """return match.always if match is none
118
118
119 This internal method provides a way for child objects to override the
119 This internal method provides a way for child objects to override the
120 match operator.
120 match operator.
121 """
121 """
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123
123
124 def _buildstatus(self, other, s, match, listignored, listclean,
124 def _buildstatus(self, other, s, match, listignored, listclean,
125 listunknown):
125 listunknown):
126 """build a status with respect to another context"""
126 """build a status with respect to another context"""
127 # Load earliest manifest first for caching reasons. More specifically,
127 # Load earliest manifest first for caching reasons. More specifically,
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 # 1000 and cache it so that when you read 1001, we just need to apply a
130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 # delta to what's in the cache. So that's one full reconstruction + one
131 # delta to what's in the cache. So that's one full reconstruction + one
132 # delta application.
132 # delta application.
133 if self.rev() is not None and self.rev() < other.rev():
133 if self.rev() is not None and self.rev() < other.rev():
134 self.manifest()
134 self.manifest()
135 mf1 = other._manifestmatches(match, s)
135 mf1 = other._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
137
137
138 modified, added, clean = [], [], []
138 modified, added = [], []
139 removed = []
140 clean = set()
139 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
140 deletedset = set(deleted)
142 deletedset = set(deleted)
141 withflags = mf1.withflags() | mf2.withflags()
143 d = mf1.diff(mf2)
142 for fn, mf2node in mf2.iteritems():
144 for fn, ((node1, flag1), (node2, flag2)) in d.iteritems():
143 if fn in deletedset:
145 if fn in deletedset:
144 continue
146 continue
145 if fn in mf1:
147 if node1 is None:
146 if ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
148 added.append(fn)
147 (mf1[fn] != mf2node and
149 elif node2 is None:
148 (mf2node != _newnode or self[fn].cmp(other[fn])))):
150 removed.append(fn)
149 modified.append(fn)
151 elif node2 != _newnode:
150 elif listclean:
152 # The file was not a new file in mf2, so an entry
151 clean.append(fn)
153 # from diff is really a difference.
152 del mf1[fn]
154 modified.append(fn)
155 elif self[fn].cmp(other[fn]):
156 # node2 was newnode, but the working file doesn't
157 # match the one in mf1.
158 modified.append(fn)
153 else:
159 else:
154 added.append(fn)
160 clean.add(fn)
155 removed = mf1.keys()
161 if listclean:
162 nondiff = (set(mf1) | set(mf2)) - set(d)
163 clean = list((clean | nondiff) - deletedset)
164 else:
165 clean = []
166
156 if removed:
167 if removed:
157 # need to filter files if they are already reported as removed
168 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
169 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
170 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
171 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
172 removed = [fn for fn in removed if fn not in deletedset]
162
173
163 return scmutil.status(modified, added, removed, deleted, unknown,
174 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
175 ignored, clean)
165
176
166 @propertycache
177 @propertycache
167 def substate(self):
178 def substate(self):
168 return subrepo.state(self, self._repo.ui)
179 return subrepo.state(self, self._repo.ui)
169
180
170 def subrev(self, subpath):
181 def subrev(self, subpath):
171 return self.substate[subpath][1]
182 return self.substate[subpath][1]
172
183
173 def rev(self):
184 def rev(self):
174 return self._rev
185 return self._rev
175 def node(self):
186 def node(self):
176 return self._node
187 return self._node
177 def hex(self):
188 def hex(self):
178 return hex(self.node())
189 return hex(self.node())
179 def manifest(self):
190 def manifest(self):
180 return self._manifest
191 return self._manifest
181 def phasestr(self):
192 def phasestr(self):
182 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
183 def mutable(self):
194 def mutable(self):
184 return self.phase() > phases.public
195 return self.phase() > phases.public
185
196
186 def getfileset(self, expr):
197 def getfileset(self, expr):
187 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
188
199
189 def obsolete(self):
200 def obsolete(self):
190 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
191 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
192
203
193 def extinct(self):
204 def extinct(self):
194 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
195 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
196
207
197 def unstable(self):
208 def unstable(self):
198 """True if the changeset is not obsolete but it's ancestor are"""
209 """True if the changeset is not obsolete but it's ancestor are"""
199 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
200
211
201 def bumped(self):
212 def bumped(self):
202 """True if the changeset try to be a successor of a public changeset
213 """True if the changeset try to be a successor of a public changeset
203
214
204 Only non-public and non-obsolete changesets may be bumped.
215 Only non-public and non-obsolete changesets may be bumped.
205 """
216 """
206 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
207
218
208 def divergent(self):
219 def divergent(self):
209 """Is a successors of a changeset with multiple possible successors set
220 """Is a successors of a changeset with multiple possible successors set
210
221
211 Only non-public and non-obsolete changesets may be divergent.
222 Only non-public and non-obsolete changesets may be divergent.
212 """
223 """
213 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
214
225
215 def troubled(self):
226 def troubled(self):
216 """True if the changeset is either unstable, bumped or divergent"""
227 """True if the changeset is either unstable, bumped or divergent"""
217 return self.unstable() or self.bumped() or self.divergent()
228 return self.unstable() or self.bumped() or self.divergent()
218
229
219 def troubles(self):
230 def troubles(self):
220 """return the list of troubles affecting this changesets.
231 """return the list of troubles affecting this changesets.
221
232
222 Troubles are returned as strings. possible values are:
233 Troubles are returned as strings. possible values are:
223 - unstable,
234 - unstable,
224 - bumped,
235 - bumped,
225 - divergent.
236 - divergent.
226 """
237 """
227 troubles = []
238 troubles = []
228 if self.unstable():
239 if self.unstable():
229 troubles.append('unstable')
240 troubles.append('unstable')
230 if self.bumped():
241 if self.bumped():
231 troubles.append('bumped')
242 troubles.append('bumped')
232 if self.divergent():
243 if self.divergent():
233 troubles.append('divergent')
244 troubles.append('divergent')
234 return troubles
245 return troubles
235
246
236 def parents(self):
247 def parents(self):
237 """return contexts for each parent changeset"""
248 """return contexts for each parent changeset"""
238 return self._parents
249 return self._parents
239
250
240 def p1(self):
251 def p1(self):
241 return self._parents[0]
252 return self._parents[0]
242
253
243 def p2(self):
254 def p2(self):
244 if len(self._parents) == 2:
255 if len(self._parents) == 2:
245 return self._parents[1]
256 return self._parents[1]
246 return changectx(self._repo, -1)
257 return changectx(self._repo, -1)
247
258
248 def _fileinfo(self, path):
259 def _fileinfo(self, path):
249 if '_manifest' in self.__dict__:
260 if '_manifest' in self.__dict__:
250 try:
261 try:
251 return self._manifest[path], self._manifest.flags(path)
262 return self._manifest[path], self._manifest.flags(path)
252 except KeyError:
263 except KeyError:
253 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
254 _('not found in manifest'))
265 _('not found in manifest'))
255 if '_manifestdelta' in self.__dict__ or path in self.files():
266 if '_manifestdelta' in self.__dict__ or path in self.files():
256 if path in self._manifestdelta:
267 if path in self._manifestdelta:
257 return (self._manifestdelta[path],
268 return (self._manifestdelta[path],
258 self._manifestdelta.flags(path))
269 self._manifestdelta.flags(path))
259 node, flag = self._repo.manifest.find(self._changeset[0], path)
270 node, flag = self._repo.manifest.find(self._changeset[0], path)
260 if not node:
271 if not node:
261 raise error.ManifestLookupError(self._node, path,
272 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
273 _('not found in manifest'))
263
274
264 return node, flag
275 return node, flag
265
276
266 def filenode(self, path):
277 def filenode(self, path):
267 return self._fileinfo(path)[0]
278 return self._fileinfo(path)[0]
268
279
269 def flags(self, path):
280 def flags(self, path):
270 try:
281 try:
271 return self._fileinfo(path)[1]
282 return self._fileinfo(path)[1]
272 except error.LookupError:
283 except error.LookupError:
273 return ''
284 return ''
274
285
275 def sub(self, path):
286 def sub(self, path):
276 return subrepo.subrepo(self, path)
287 return subrepo.subrepo(self, path)
277
288
278 def match(self, pats=[], include=None, exclude=None, default='glob'):
289 def match(self, pats=[], include=None, exclude=None, default='glob'):
279 r = self._repo
290 r = self._repo
280 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
281 include, exclude, default,
292 include, exclude, default,
282 auditor=r.auditor, ctx=self)
293 auditor=r.auditor, ctx=self)
283
294
284 def diff(self, ctx2=None, match=None, **opts):
295 def diff(self, ctx2=None, match=None, **opts):
285 """Returns a diff generator for the given contexts and matcher"""
296 """Returns a diff generator for the given contexts and matcher"""
286 if ctx2 is None:
297 if ctx2 is None:
287 ctx2 = self.p1()
298 ctx2 = self.p1()
288 if ctx2 is not None:
299 if ctx2 is not None:
289 ctx2 = self._repo[ctx2]
300 ctx2 = self._repo[ctx2]
290 diffopts = patch.diffopts(self._repo.ui, opts)
301 diffopts = patch.diffopts(self._repo.ui, opts)
291 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
302 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
292
303
293 @propertycache
304 @propertycache
294 def _dirs(self):
305 def _dirs(self):
295 return scmutil.dirs(self._manifest)
306 return scmutil.dirs(self._manifest)
296
307
297 def dirs(self):
308 def dirs(self):
298 return self._dirs
309 return self._dirs
299
310
300 def dirty(self, missing=False, merge=True, branch=True):
311 def dirty(self, missing=False, merge=True, branch=True):
301 return False
312 return False
302
313
303 def status(self, other=None, match=None, listignored=False,
314 def status(self, other=None, match=None, listignored=False,
304 listclean=False, listunknown=False, listsubrepos=False):
315 listclean=False, listunknown=False, listsubrepos=False):
305 """return status of files between two nodes or node and working
316 """return status of files between two nodes or node and working
306 directory.
317 directory.
307
318
308 If other is None, compare this node with working directory.
319 If other is None, compare this node with working directory.
309
320
310 returns (modified, added, removed, deleted, unknown, ignored, clean)
321 returns (modified, added, removed, deleted, unknown, ignored, clean)
311 """
322 """
312
323
313 ctx1 = self
324 ctx1 = self
314 ctx2 = self._repo[other]
325 ctx2 = self._repo[other]
315
326
316 # This next code block is, admittedly, fragile logic that tests for
327 # This next code block is, admittedly, fragile logic that tests for
317 # reversing the contexts and wouldn't need to exist if it weren't for
328 # reversing the contexts and wouldn't need to exist if it weren't for
318 # the fast (and common) code path of comparing the working directory
329 # the fast (and common) code path of comparing the working directory
319 # with its first parent.
330 # with its first parent.
320 #
331 #
321 # What we're aiming for here is the ability to call:
332 # What we're aiming for here is the ability to call:
322 #
333 #
323 # workingctx.status(parentctx)
334 # workingctx.status(parentctx)
324 #
335 #
325 # If we always built the manifest for each context and compared those,
336 # If we always built the manifest for each context and compared those,
326 # then we'd be done. But the special case of the above call means we
337 # then we'd be done. But the special case of the above call means we
327 # just copy the manifest of the parent.
338 # just copy the manifest of the parent.
328 reversed = False
339 reversed = False
329 if (not isinstance(ctx1, changectx)
340 if (not isinstance(ctx1, changectx)
330 and isinstance(ctx2, changectx)):
341 and isinstance(ctx2, changectx)):
331 reversed = True
342 reversed = True
332 ctx1, ctx2 = ctx2, ctx1
343 ctx1, ctx2 = ctx2, ctx1
333
344
334 match = ctx2._matchstatus(ctx1, match)
345 match = ctx2._matchstatus(ctx1, match)
335 r = scmutil.status([], [], [], [], [], [], [])
346 r = scmutil.status([], [], [], [], [], [], [])
336 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
347 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
337 listunknown)
348 listunknown)
338
349
339 if reversed:
350 if reversed:
340 # Reverse added and removed. Clear deleted, unknown and ignored as
351 # Reverse added and removed. Clear deleted, unknown and ignored as
341 # these make no sense to reverse.
352 # these make no sense to reverse.
342 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
353 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
343 r.clean)
354 r.clean)
344
355
345 if listsubrepos:
356 if listsubrepos:
346 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
357 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
347 rev2 = ctx2.subrev(subpath)
358 rev2 = ctx2.subrev(subpath)
348 try:
359 try:
349 submatch = matchmod.narrowmatcher(subpath, match)
360 submatch = matchmod.narrowmatcher(subpath, match)
350 s = sub.status(rev2, match=submatch, ignored=listignored,
361 s = sub.status(rev2, match=submatch, ignored=listignored,
351 clean=listclean, unknown=listunknown,
362 clean=listclean, unknown=listunknown,
352 listsubrepos=True)
363 listsubrepos=True)
353 for rfiles, sfiles in zip(r, s):
364 for rfiles, sfiles in zip(r, s):
354 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
365 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
355 except error.LookupError:
366 except error.LookupError:
356 self._repo.ui.status(_("skipping missing "
367 self._repo.ui.status(_("skipping missing "
357 "subrepository: %s\n") % subpath)
368 "subrepository: %s\n") % subpath)
358
369
359 for l in r:
370 for l in r:
360 l.sort()
371 l.sort()
361
372
362 return r
373 return r
363
374
364
375
365 def makememctx(repo, parents, text, user, date, branch, files, store,
376 def makememctx(repo, parents, text, user, date, branch, files, store,
366 editor=None):
377 editor=None):
367 def getfilectx(repo, memctx, path):
378 def getfilectx(repo, memctx, path):
368 data, mode, copied = store.getfile(path)
379 data, mode, copied = store.getfile(path)
369 if data is None:
380 if data is None:
370 return None
381 return None
371 islink, isexec = mode
382 islink, isexec = mode
372 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
383 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
373 copied=copied, memctx=memctx)
384 copied=copied, memctx=memctx)
374 extra = {}
385 extra = {}
375 if branch:
386 if branch:
376 extra['branch'] = encoding.fromlocal(branch)
387 extra['branch'] = encoding.fromlocal(branch)
377 ctx = memctx(repo, parents, text, files, getfilectx, user,
388 ctx = memctx(repo, parents, text, files, getfilectx, user,
378 date, extra, editor)
389 date, extra, editor)
379 return ctx
390 return ctx
380
391
381 class changectx(basectx):
392 class changectx(basectx):
382 """A changecontext object makes access to data related to a particular
393 """A changecontext object makes access to data related to a particular
383 changeset convenient. It represents a read-only context already present in
394 changeset convenient. It represents a read-only context already present in
384 the repo."""
395 the repo."""
385 def __init__(self, repo, changeid=''):
396 def __init__(self, repo, changeid=''):
386 """changeid is a revision number, node, or tag"""
397 """changeid is a revision number, node, or tag"""
387
398
388 # since basectx.__new__ already took care of copying the object, we
399 # since basectx.__new__ already took care of copying the object, we
389 # don't need to do anything in __init__, so we just exit here
400 # don't need to do anything in __init__, so we just exit here
390 if isinstance(changeid, basectx):
401 if isinstance(changeid, basectx):
391 return
402 return
392
403
393 if changeid == '':
404 if changeid == '':
394 changeid = '.'
405 changeid = '.'
395 self._repo = repo
406 self._repo = repo
396
407
397 try:
408 try:
398 if isinstance(changeid, int):
409 if isinstance(changeid, int):
399 self._node = repo.changelog.node(changeid)
410 self._node = repo.changelog.node(changeid)
400 self._rev = changeid
411 self._rev = changeid
401 return
412 return
402 if isinstance(changeid, long):
413 if isinstance(changeid, long):
403 changeid = str(changeid)
414 changeid = str(changeid)
404 if changeid == '.':
415 if changeid == '.':
405 self._node = repo.dirstate.p1()
416 self._node = repo.dirstate.p1()
406 self._rev = repo.changelog.rev(self._node)
417 self._rev = repo.changelog.rev(self._node)
407 return
418 return
408 if changeid == 'null':
419 if changeid == 'null':
409 self._node = nullid
420 self._node = nullid
410 self._rev = nullrev
421 self._rev = nullrev
411 return
422 return
412 if changeid == 'tip':
423 if changeid == 'tip':
413 self._node = repo.changelog.tip()
424 self._node = repo.changelog.tip()
414 self._rev = repo.changelog.rev(self._node)
425 self._rev = repo.changelog.rev(self._node)
415 return
426 return
416 if len(changeid) == 20:
427 if len(changeid) == 20:
417 try:
428 try:
418 self._node = changeid
429 self._node = changeid
419 self._rev = repo.changelog.rev(changeid)
430 self._rev = repo.changelog.rev(changeid)
420 return
431 return
421 except error.FilteredRepoLookupError:
432 except error.FilteredRepoLookupError:
422 raise
433 raise
423 except LookupError:
434 except LookupError:
424 pass
435 pass
425
436
426 try:
437 try:
427 r = int(changeid)
438 r = int(changeid)
428 if str(r) != changeid:
439 if str(r) != changeid:
429 raise ValueError
440 raise ValueError
430 l = len(repo.changelog)
441 l = len(repo.changelog)
431 if r < 0:
442 if r < 0:
432 r += l
443 r += l
433 if r < 0 or r >= l:
444 if r < 0 or r >= l:
434 raise ValueError
445 raise ValueError
435 self._rev = r
446 self._rev = r
436 self._node = repo.changelog.node(r)
447 self._node = repo.changelog.node(r)
437 return
448 return
438 except error.FilteredIndexError:
449 except error.FilteredIndexError:
439 raise
450 raise
440 except (ValueError, OverflowError, IndexError):
451 except (ValueError, OverflowError, IndexError):
441 pass
452 pass
442
453
443 if len(changeid) == 40:
454 if len(changeid) == 40:
444 try:
455 try:
445 self._node = bin(changeid)
456 self._node = bin(changeid)
446 self._rev = repo.changelog.rev(self._node)
457 self._rev = repo.changelog.rev(self._node)
447 return
458 return
448 except error.FilteredLookupError:
459 except error.FilteredLookupError:
449 raise
460 raise
450 except (TypeError, LookupError):
461 except (TypeError, LookupError):
451 pass
462 pass
452
463
453 # lookup bookmarks through the name interface
464 # lookup bookmarks through the name interface
454 try:
465 try:
455 self._node = repo.names.singlenode(repo, changeid)
466 self._node = repo.names.singlenode(repo, changeid)
456 self._rev = repo.changelog.rev(self._node)
467 self._rev = repo.changelog.rev(self._node)
457 return
468 return
458 except KeyError:
469 except KeyError:
459 pass
470 pass
460 except error.FilteredRepoLookupError:
471 except error.FilteredRepoLookupError:
461 raise
472 raise
462 except error.RepoLookupError:
473 except error.RepoLookupError:
463 pass
474 pass
464
475
465 self._node = repo.unfiltered().changelog._partialmatch(changeid)
476 self._node = repo.unfiltered().changelog._partialmatch(changeid)
466 if self._node is not None:
477 if self._node is not None:
467 self._rev = repo.changelog.rev(self._node)
478 self._rev = repo.changelog.rev(self._node)
468 return
479 return
469
480
470 # lookup failed
481 # lookup failed
471 # check if it might have come from damaged dirstate
482 # check if it might have come from damaged dirstate
472 #
483 #
473 # XXX we could avoid the unfiltered if we had a recognizable
484 # XXX we could avoid the unfiltered if we had a recognizable
474 # exception for filtered changeset access
485 # exception for filtered changeset access
475 if changeid in repo.unfiltered().dirstate.parents():
486 if changeid in repo.unfiltered().dirstate.parents():
476 msg = _("working directory has unknown parent '%s'!")
487 msg = _("working directory has unknown parent '%s'!")
477 raise error.Abort(msg % short(changeid))
488 raise error.Abort(msg % short(changeid))
478 try:
489 try:
479 if len(changeid) == 20:
490 if len(changeid) == 20:
480 changeid = hex(changeid)
491 changeid = hex(changeid)
481 except TypeError:
492 except TypeError:
482 pass
493 pass
483 except (error.FilteredIndexError, error.FilteredLookupError,
494 except (error.FilteredIndexError, error.FilteredLookupError,
484 error.FilteredRepoLookupError):
495 error.FilteredRepoLookupError):
485 if repo.filtername == 'visible':
496 if repo.filtername == 'visible':
486 msg = _("hidden revision '%s'") % changeid
497 msg = _("hidden revision '%s'") % changeid
487 hint = _('use --hidden to access hidden revisions')
498 hint = _('use --hidden to access hidden revisions')
488 raise error.FilteredRepoLookupError(msg, hint=hint)
499 raise error.FilteredRepoLookupError(msg, hint=hint)
489 msg = _("filtered revision '%s' (not in '%s' subset)")
500 msg = _("filtered revision '%s' (not in '%s' subset)")
490 msg %= (changeid, repo.filtername)
501 msg %= (changeid, repo.filtername)
491 raise error.FilteredRepoLookupError(msg)
502 raise error.FilteredRepoLookupError(msg)
492 except IndexError:
503 except IndexError:
493 pass
504 pass
494 raise error.RepoLookupError(
505 raise error.RepoLookupError(
495 _("unknown revision '%s'") % changeid)
506 _("unknown revision '%s'") % changeid)
496
507
497 def __hash__(self):
508 def __hash__(self):
498 try:
509 try:
499 return hash(self._rev)
510 return hash(self._rev)
500 except AttributeError:
511 except AttributeError:
501 return id(self)
512 return id(self)
502
513
503 def __nonzero__(self):
514 def __nonzero__(self):
504 return self._rev != nullrev
515 return self._rev != nullrev
505
516
506 @propertycache
517 @propertycache
507 def _changeset(self):
518 def _changeset(self):
508 return self._repo.changelog.read(self.rev())
519 return self._repo.changelog.read(self.rev())
509
520
510 @propertycache
521 @propertycache
511 def _manifest(self):
522 def _manifest(self):
512 return self._repo.manifest.read(self._changeset[0])
523 return self._repo.manifest.read(self._changeset[0])
513
524
514 @propertycache
525 @propertycache
515 def _manifestdelta(self):
526 def _manifestdelta(self):
516 return self._repo.manifest.readdelta(self._changeset[0])
527 return self._repo.manifest.readdelta(self._changeset[0])
517
528
518 @propertycache
529 @propertycache
519 def _parents(self):
530 def _parents(self):
520 p = self._repo.changelog.parentrevs(self._rev)
531 p = self._repo.changelog.parentrevs(self._rev)
521 if p[1] == nullrev:
532 if p[1] == nullrev:
522 p = p[:-1]
533 p = p[:-1]
523 return [changectx(self._repo, x) for x in p]
534 return [changectx(self._repo, x) for x in p]
524
535
525 def changeset(self):
536 def changeset(self):
526 return self._changeset
537 return self._changeset
527 def manifestnode(self):
538 def manifestnode(self):
528 return self._changeset[0]
539 return self._changeset[0]
529
540
530 def user(self):
541 def user(self):
531 return self._changeset[1]
542 return self._changeset[1]
532 def date(self):
543 def date(self):
533 return self._changeset[2]
544 return self._changeset[2]
534 def files(self):
545 def files(self):
535 return self._changeset[3]
546 return self._changeset[3]
536 def description(self):
547 def description(self):
537 return self._changeset[4]
548 return self._changeset[4]
538 def branch(self):
549 def branch(self):
539 return encoding.tolocal(self._changeset[5].get("branch"))
550 return encoding.tolocal(self._changeset[5].get("branch"))
540 def closesbranch(self):
551 def closesbranch(self):
541 return 'close' in self._changeset[5]
552 return 'close' in self._changeset[5]
542 def extra(self):
553 def extra(self):
543 return self._changeset[5]
554 return self._changeset[5]
544 def tags(self):
555 def tags(self):
545 return self._repo.nodetags(self._node)
556 return self._repo.nodetags(self._node)
546 def bookmarks(self):
557 def bookmarks(self):
547 return self._repo.nodebookmarks(self._node)
558 return self._repo.nodebookmarks(self._node)
548 def phase(self):
559 def phase(self):
549 return self._repo._phasecache.phase(self._repo, self._rev)
560 return self._repo._phasecache.phase(self._repo, self._rev)
550 def hidden(self):
561 def hidden(self):
551 return self._rev in repoview.filterrevs(self._repo, 'visible')
562 return self._rev in repoview.filterrevs(self._repo, 'visible')
552
563
553 def children(self):
564 def children(self):
554 """return contexts for each child changeset"""
565 """return contexts for each child changeset"""
555 c = self._repo.changelog.children(self._node)
566 c = self._repo.changelog.children(self._node)
556 return [changectx(self._repo, x) for x in c]
567 return [changectx(self._repo, x) for x in c]
557
568
558 def ancestors(self):
569 def ancestors(self):
559 for a in self._repo.changelog.ancestors([self._rev]):
570 for a in self._repo.changelog.ancestors([self._rev]):
560 yield changectx(self._repo, a)
571 yield changectx(self._repo, a)
561
572
562 def descendants(self):
573 def descendants(self):
563 for d in self._repo.changelog.descendants([self._rev]):
574 for d in self._repo.changelog.descendants([self._rev]):
564 yield changectx(self._repo, d)
575 yield changectx(self._repo, d)
565
576
566 def filectx(self, path, fileid=None, filelog=None):
577 def filectx(self, path, fileid=None, filelog=None):
567 """get a file context from this changeset"""
578 """get a file context from this changeset"""
568 if fileid is None:
579 if fileid is None:
569 fileid = self.filenode(path)
580 fileid = self.filenode(path)
570 return filectx(self._repo, path, fileid=fileid,
581 return filectx(self._repo, path, fileid=fileid,
571 changectx=self, filelog=filelog)
582 changectx=self, filelog=filelog)
572
583
573 def ancestor(self, c2, warn=False):
584 def ancestor(self, c2, warn=False):
574 """return the "best" ancestor context of self and c2
585 """return the "best" ancestor context of self and c2
575
586
576 If there are multiple candidates, it will show a message and check
587 If there are multiple candidates, it will show a message and check
577 merge.preferancestor configuration before falling back to the
588 merge.preferancestor configuration before falling back to the
578 revlog ancestor."""
589 revlog ancestor."""
579 # deal with workingctxs
590 # deal with workingctxs
580 n2 = c2._node
591 n2 = c2._node
581 if n2 is None:
592 if n2 is None:
582 n2 = c2._parents[0]._node
593 n2 = c2._parents[0]._node
583 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
594 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
584 if not cahs:
595 if not cahs:
585 anc = nullid
596 anc = nullid
586 elif len(cahs) == 1:
597 elif len(cahs) == 1:
587 anc = cahs[0]
598 anc = cahs[0]
588 else:
599 else:
589 for r in self._repo.ui.configlist('merge', 'preferancestor'):
600 for r in self._repo.ui.configlist('merge', 'preferancestor'):
590 try:
601 try:
591 ctx = changectx(self._repo, r)
602 ctx = changectx(self._repo, r)
592 except error.RepoLookupError:
603 except error.RepoLookupError:
593 continue
604 continue
594 anc = ctx.node()
605 anc = ctx.node()
595 if anc in cahs:
606 if anc in cahs:
596 break
607 break
597 else:
608 else:
598 anc = self._repo.changelog.ancestor(self._node, n2)
609 anc = self._repo.changelog.ancestor(self._node, n2)
599 if warn:
610 if warn:
600 self._repo.ui.status(
611 self._repo.ui.status(
601 (_("note: using %s as ancestor of %s and %s\n") %
612 (_("note: using %s as ancestor of %s and %s\n") %
602 (short(anc), short(self._node), short(n2))) +
613 (short(anc), short(self._node), short(n2))) +
603 ''.join(_(" alternatively, use --config "
614 ''.join(_(" alternatively, use --config "
604 "merge.preferancestor=%s\n") %
615 "merge.preferancestor=%s\n") %
605 short(n) for n in sorted(cahs) if n != anc))
616 short(n) for n in sorted(cahs) if n != anc))
606 return changectx(self._repo, anc)
617 return changectx(self._repo, anc)
607
618
608 def descendant(self, other):
619 def descendant(self, other):
609 """True if other is descendant of this changeset"""
620 """True if other is descendant of this changeset"""
610 return self._repo.changelog.descendant(self._rev, other._rev)
621 return self._repo.changelog.descendant(self._rev, other._rev)
611
622
612 def walk(self, match):
623 def walk(self, match):
613 fset = set(match.files())
624 fset = set(match.files())
614 # for dirstate.walk, files=['.'] means "walk the whole tree".
625 # for dirstate.walk, files=['.'] means "walk the whole tree".
615 # follow that here, too
626 # follow that here, too
616 fset.discard('.')
627 fset.discard('.')
617
628
618 # avoid the entire walk if we're only looking for specific files
629 # avoid the entire walk if we're only looking for specific files
619 if fset and not match.anypats():
630 if fset and not match.anypats():
620 if util.all([fn in self for fn in fset]):
631 if util.all([fn in self for fn in fset]):
621 for fn in sorted(fset):
632 for fn in sorted(fset):
622 if match(fn):
633 if match(fn):
623 yield fn
634 yield fn
624 raise StopIteration
635 raise StopIteration
625
636
626 for fn in self:
637 for fn in self:
627 if fn in fset:
638 if fn in fset:
628 # specified pattern is the exact name
639 # specified pattern is the exact name
629 fset.remove(fn)
640 fset.remove(fn)
630 if match(fn):
641 if match(fn):
631 yield fn
642 yield fn
632 for fn in sorted(fset):
643 for fn in sorted(fset):
633 if fn in self._dirs:
644 if fn in self._dirs:
634 # specified pattern is a directory
645 # specified pattern is a directory
635 continue
646 continue
636 match.bad(fn, _('no such file in rev %s') % self)
647 match.bad(fn, _('no such file in rev %s') % self)
637
648
638 def matches(self, match):
649 def matches(self, match):
639 return self.walk(match)
650 return self.walk(match)
640
651
641 class basefilectx(object):
652 class basefilectx(object):
642 """A filecontext object represents the common logic for its children:
653 """A filecontext object represents the common logic for its children:
643 filectx: read-only access to a filerevision that is already present
654 filectx: read-only access to a filerevision that is already present
644 in the repo,
655 in the repo,
645 workingfilectx: a filecontext that represents files from the working
656 workingfilectx: a filecontext that represents files from the working
646 directory,
657 directory,
647 memfilectx: a filecontext that represents files in-memory."""
658 memfilectx: a filecontext that represents files in-memory."""
648 def __new__(cls, repo, path, *args, **kwargs):
659 def __new__(cls, repo, path, *args, **kwargs):
649 return super(basefilectx, cls).__new__(cls)
660 return super(basefilectx, cls).__new__(cls)
650
661
651 @propertycache
662 @propertycache
652 def _filelog(self):
663 def _filelog(self):
653 return self._repo.file(self._path)
664 return self._repo.file(self._path)
654
665
655 @propertycache
666 @propertycache
656 def _changeid(self):
667 def _changeid(self):
657 if '_changeid' in self.__dict__:
668 if '_changeid' in self.__dict__:
658 return self._changeid
669 return self._changeid
659 elif '_changectx' in self.__dict__:
670 elif '_changectx' in self.__dict__:
660 return self._changectx.rev()
671 return self._changectx.rev()
661 else:
672 else:
662 return self._filelog.linkrev(self._filerev)
673 return self._filelog.linkrev(self._filerev)
663
674
664 @propertycache
675 @propertycache
665 def _filenode(self):
676 def _filenode(self):
666 if '_fileid' in self.__dict__:
677 if '_fileid' in self.__dict__:
667 return self._filelog.lookup(self._fileid)
678 return self._filelog.lookup(self._fileid)
668 else:
679 else:
669 return self._changectx.filenode(self._path)
680 return self._changectx.filenode(self._path)
670
681
671 @propertycache
682 @propertycache
672 def _filerev(self):
683 def _filerev(self):
673 return self._filelog.rev(self._filenode)
684 return self._filelog.rev(self._filenode)
674
685
675 @propertycache
686 @propertycache
676 def _repopath(self):
687 def _repopath(self):
677 return self._path
688 return self._path
678
689
679 def __nonzero__(self):
690 def __nonzero__(self):
680 try:
691 try:
681 self._filenode
692 self._filenode
682 return True
693 return True
683 except error.LookupError:
694 except error.LookupError:
684 # file is missing
695 # file is missing
685 return False
696 return False
686
697
687 def __str__(self):
698 def __str__(self):
688 return "%s@%s" % (self.path(), self._changectx)
699 return "%s@%s" % (self.path(), self._changectx)
689
700
690 def __repr__(self):
701 def __repr__(self):
691 return "<%s %s>" % (type(self).__name__, str(self))
702 return "<%s %s>" % (type(self).__name__, str(self))
692
703
693 def __hash__(self):
704 def __hash__(self):
694 try:
705 try:
695 return hash((self._path, self._filenode))
706 return hash((self._path, self._filenode))
696 except AttributeError:
707 except AttributeError:
697 return id(self)
708 return id(self)
698
709
699 def __eq__(self, other):
710 def __eq__(self, other):
700 try:
711 try:
701 return (type(self) == type(other) and self._path == other._path
712 return (type(self) == type(other) and self._path == other._path
702 and self._filenode == other._filenode)
713 and self._filenode == other._filenode)
703 except AttributeError:
714 except AttributeError:
704 return False
715 return False
705
716
706 def __ne__(self, other):
717 def __ne__(self, other):
707 return not (self == other)
718 return not (self == other)
708
719
709 def filerev(self):
720 def filerev(self):
710 return self._filerev
721 return self._filerev
711 def filenode(self):
722 def filenode(self):
712 return self._filenode
723 return self._filenode
713 def flags(self):
724 def flags(self):
714 return self._changectx.flags(self._path)
725 return self._changectx.flags(self._path)
715 def filelog(self):
726 def filelog(self):
716 return self._filelog
727 return self._filelog
717 def rev(self):
728 def rev(self):
718 return self._changeid
729 return self._changeid
719 def linkrev(self):
730 def linkrev(self):
720 return self._filelog.linkrev(self._filerev)
731 return self._filelog.linkrev(self._filerev)
721 def node(self):
732 def node(self):
722 return self._changectx.node()
733 return self._changectx.node()
723 def hex(self):
734 def hex(self):
724 return self._changectx.hex()
735 return self._changectx.hex()
725 def user(self):
736 def user(self):
726 return self._changectx.user()
737 return self._changectx.user()
727 def date(self):
738 def date(self):
728 return self._changectx.date()
739 return self._changectx.date()
729 def files(self):
740 def files(self):
730 return self._changectx.files()
741 return self._changectx.files()
731 def description(self):
742 def description(self):
732 return self._changectx.description()
743 return self._changectx.description()
733 def branch(self):
744 def branch(self):
734 return self._changectx.branch()
745 return self._changectx.branch()
735 def extra(self):
746 def extra(self):
736 return self._changectx.extra()
747 return self._changectx.extra()
737 def phase(self):
748 def phase(self):
738 return self._changectx.phase()
749 return self._changectx.phase()
739 def phasestr(self):
750 def phasestr(self):
740 return self._changectx.phasestr()
751 return self._changectx.phasestr()
741 def manifest(self):
752 def manifest(self):
742 return self._changectx.manifest()
753 return self._changectx.manifest()
743 def changectx(self):
754 def changectx(self):
744 return self._changectx
755 return self._changectx
745
756
746 def path(self):
757 def path(self):
747 return self._path
758 return self._path
748
759
749 def isbinary(self):
760 def isbinary(self):
750 try:
761 try:
751 return util.binary(self.data())
762 return util.binary(self.data())
752 except IOError:
763 except IOError:
753 return False
764 return False
754 def isexec(self):
765 def isexec(self):
755 return 'x' in self.flags()
766 return 'x' in self.flags()
756 def islink(self):
767 def islink(self):
757 return 'l' in self.flags()
768 return 'l' in self.flags()
758
769
759 def cmp(self, fctx):
770 def cmp(self, fctx):
760 """compare with other file context
771 """compare with other file context
761
772
762 returns True if different than fctx.
773 returns True if different than fctx.
763 """
774 """
764 if (fctx._filerev is None
775 if (fctx._filerev is None
765 and (self._repo._encodefilterpats
776 and (self._repo._encodefilterpats
766 # if file data starts with '\1\n', empty metadata block is
777 # if file data starts with '\1\n', empty metadata block is
767 # prepended, which adds 4 bytes to filelog.size().
778 # prepended, which adds 4 bytes to filelog.size().
768 or self.size() - 4 == fctx.size())
779 or self.size() - 4 == fctx.size())
769 or self.size() == fctx.size()):
780 or self.size() == fctx.size()):
770 return self._filelog.cmp(self._filenode, fctx.data())
781 return self._filelog.cmp(self._filenode, fctx.data())
771
782
772 return True
783 return True
773
784
774 def introrev(self):
785 def introrev(self):
775 """return the rev of the changeset which introduced this file revision
786 """return the rev of the changeset which introduced this file revision
776
787
777 This method is different from linkrev because it take into account the
788 This method is different from linkrev because it take into account the
778 changeset the filectx was created from. It ensures the returned
789 changeset the filectx was created from. It ensures the returned
779 revision is one of its ancestors. This prevents bugs from
790 revision is one of its ancestors. This prevents bugs from
780 'linkrev-shadowing' when a file revision is used by multiple
791 'linkrev-shadowing' when a file revision is used by multiple
781 changesets.
792 changesets.
782 """
793 """
783 lkr = self.linkrev()
794 lkr = self.linkrev()
784 attrs = vars(self)
795 attrs = vars(self)
785 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
796 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
786 if noctx or self.rev() == lkr:
797 if noctx or self.rev() == lkr:
787 return self.linkrev()
798 return self.linkrev()
788 return _adjustlinkrev(self._repo, self._path, self._filelog,
799 return _adjustlinkrev(self._repo, self._path, self._filelog,
789 self._filenode, self.rev(), inclusive=True)
800 self._filenode, self.rev(), inclusive=True)
790
801
791 def parents(self):
802 def parents(self):
792 _path = self._path
803 _path = self._path
793 fl = self._filelog
804 fl = self._filelog
794 parents = self._filelog.parents(self._filenode)
805 parents = self._filelog.parents(self._filenode)
795 pl = [(_path, node, fl) for node in parents if node != nullid]
806 pl = [(_path, node, fl) for node in parents if node != nullid]
796
807
797 r = fl.renamed(self._filenode)
808 r = fl.renamed(self._filenode)
798 if r:
809 if r:
799 # - In the simple rename case, both parent are nullid, pl is empty.
810 # - In the simple rename case, both parent are nullid, pl is empty.
800 # - In case of merge, only one of the parent is null id and should
811 # - In case of merge, only one of the parent is null id and should
801 # be replaced with the rename information. This parent is -always-
812 # be replaced with the rename information. This parent is -always-
802 # the first one.
813 # the first one.
803 #
814 #
804 # As null id have alway been filtered out in the previous list
815 # As null id have alway been filtered out in the previous list
805 # comprehension, inserting to 0 will always result in "replacing
816 # comprehension, inserting to 0 will always result in "replacing
806 # first nullid parent with rename information.
817 # first nullid parent with rename information.
807 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
818 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
808
819
809 ret = []
820 ret = []
810 for path, fnode, l in pl:
821 for path, fnode, l in pl:
811 if '_changeid' in vars(self) or '_changectx' in vars(self):
822 if '_changeid' in vars(self) or '_changectx' in vars(self):
812 # If self is associated with a changeset (probably explicitly
823 # If self is associated with a changeset (probably explicitly
813 # fed), ensure the created filectx is associated with a
824 # fed), ensure the created filectx is associated with a
814 # changeset that is an ancestor of self.changectx.
825 # changeset that is an ancestor of self.changectx.
815 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
826 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
816 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
827 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
817 changeid=rev)
828 changeid=rev)
818 else:
829 else:
819 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
830 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
820 ret.append(fctx)
831 ret.append(fctx)
821 return ret
832 return ret
822
833
823 def p1(self):
834 def p1(self):
824 return self.parents()[0]
835 return self.parents()[0]
825
836
826 def p2(self):
837 def p2(self):
827 p = self.parents()
838 p = self.parents()
828 if len(p) == 2:
839 if len(p) == 2:
829 return p[1]
840 return p[1]
830 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
841 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
831
842
832 def annotate(self, follow=False, linenumber=None, diffopts=None):
843 def annotate(self, follow=False, linenumber=None, diffopts=None):
833 '''returns a list of tuples of (ctx, line) for each line
844 '''returns a list of tuples of (ctx, line) for each line
834 in the file, where ctx is the filectx of the node where
845 in the file, where ctx is the filectx of the node where
835 that line was last changed.
846 that line was last changed.
836 This returns tuples of ((ctx, linenumber), line) for each line,
847 This returns tuples of ((ctx, linenumber), line) for each line,
837 if "linenumber" parameter is NOT "None".
848 if "linenumber" parameter is NOT "None".
838 In such tuples, linenumber means one at the first appearance
849 In such tuples, linenumber means one at the first appearance
839 in the managed file.
850 in the managed file.
840 To reduce annotation cost,
851 To reduce annotation cost,
841 this returns fixed value(False is used) as linenumber,
852 this returns fixed value(False is used) as linenumber,
842 if "linenumber" parameter is "False".'''
853 if "linenumber" parameter is "False".'''
843
854
844 if linenumber is None:
855 if linenumber is None:
845 def decorate(text, rev):
856 def decorate(text, rev):
846 return ([rev] * len(text.splitlines()), text)
857 return ([rev] * len(text.splitlines()), text)
847 elif linenumber:
858 elif linenumber:
848 def decorate(text, rev):
859 def decorate(text, rev):
849 size = len(text.splitlines())
860 size = len(text.splitlines())
850 return ([(rev, i) for i in xrange(1, size + 1)], text)
861 return ([(rev, i) for i in xrange(1, size + 1)], text)
851 else:
862 else:
852 def decorate(text, rev):
863 def decorate(text, rev):
853 return ([(rev, False)] * len(text.splitlines()), text)
864 return ([(rev, False)] * len(text.splitlines()), text)
854
865
855 def pair(parent, child):
866 def pair(parent, child):
856 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
867 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
857 refine=True)
868 refine=True)
858 for (a1, a2, b1, b2), t in blocks:
869 for (a1, a2, b1, b2), t in blocks:
859 # Changed blocks ('!') or blocks made only of blank lines ('~')
870 # Changed blocks ('!') or blocks made only of blank lines ('~')
860 # belong to the child.
871 # belong to the child.
861 if t == '=':
872 if t == '=':
862 child[0][b1:b2] = parent[0][a1:a2]
873 child[0][b1:b2] = parent[0][a1:a2]
863 return child
874 return child
864
875
865 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
876 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
866
877
867 def parents(f):
878 def parents(f):
868 pl = f.parents()
879 pl = f.parents()
869
880
870 # Don't return renamed parents if we aren't following.
881 # Don't return renamed parents if we aren't following.
871 if not follow:
882 if not follow:
872 pl = [p for p in pl if p.path() == f.path()]
883 pl = [p for p in pl if p.path() == f.path()]
873
884
874 # renamed filectx won't have a filelog yet, so set it
885 # renamed filectx won't have a filelog yet, so set it
875 # from the cache to save time
886 # from the cache to save time
876 for p in pl:
887 for p in pl:
877 if not '_filelog' in p.__dict__:
888 if not '_filelog' in p.__dict__:
878 p._filelog = getlog(p.path())
889 p._filelog = getlog(p.path())
879
890
880 return pl
891 return pl
881
892
882 # use linkrev to find the first changeset where self appeared
893 # use linkrev to find the first changeset where self appeared
883 base = self
894 base = self
884 introrev = self.introrev()
895 introrev = self.introrev()
885 if self.rev() != introrev:
896 if self.rev() != introrev:
886 base = filectx(self._repo, self._path, filelog=self.filelog(),
897 base = filectx(self._repo, self._path, filelog=self.filelog(),
887 fileid=self.filenode(), changeid=introrev)
898 fileid=self.filenode(), changeid=introrev)
888
899
889 # This algorithm would prefer to be recursive, but Python is a
900 # This algorithm would prefer to be recursive, but Python is a
890 # bit recursion-hostile. Instead we do an iterative
901 # bit recursion-hostile. Instead we do an iterative
891 # depth-first search.
902 # depth-first search.
892
903
893 visit = [base]
904 visit = [base]
894 hist = {}
905 hist = {}
895 pcache = {}
906 pcache = {}
896 needed = {base: 1}
907 needed = {base: 1}
897 while visit:
908 while visit:
898 f = visit[-1]
909 f = visit[-1]
899 pcached = f in pcache
910 pcached = f in pcache
900 if not pcached:
911 if not pcached:
901 pcache[f] = parents(f)
912 pcache[f] = parents(f)
902
913
903 ready = True
914 ready = True
904 pl = pcache[f]
915 pl = pcache[f]
905 for p in pl:
916 for p in pl:
906 if p not in hist:
917 if p not in hist:
907 ready = False
918 ready = False
908 visit.append(p)
919 visit.append(p)
909 if not pcached:
920 if not pcached:
910 needed[p] = needed.get(p, 0) + 1
921 needed[p] = needed.get(p, 0) + 1
911 if ready:
922 if ready:
912 visit.pop()
923 visit.pop()
913 reusable = f in hist
924 reusable = f in hist
914 if reusable:
925 if reusable:
915 curr = hist[f]
926 curr = hist[f]
916 else:
927 else:
917 curr = decorate(f.data(), f)
928 curr = decorate(f.data(), f)
918 for p in pl:
929 for p in pl:
919 if not reusable:
930 if not reusable:
920 curr = pair(hist[p], curr)
931 curr = pair(hist[p], curr)
921 if needed[p] == 1:
932 if needed[p] == 1:
922 del hist[p]
933 del hist[p]
923 del needed[p]
934 del needed[p]
924 else:
935 else:
925 needed[p] -= 1
936 needed[p] -= 1
926
937
927 hist[f] = curr
938 hist[f] = curr
928 pcache[f] = []
939 pcache[f] = []
929
940
930 return zip(hist[base][0], hist[base][1].splitlines(True))
941 return zip(hist[base][0], hist[base][1].splitlines(True))
931
942
932 def ancestors(self, followfirst=False):
943 def ancestors(self, followfirst=False):
933 visit = {}
944 visit = {}
934 c = self
945 c = self
935 cut = followfirst and 1 or None
946 cut = followfirst and 1 or None
936 while True:
947 while True:
937 for parent in c.parents()[:cut]:
948 for parent in c.parents()[:cut]:
938 visit[(parent.rev(), parent.node())] = parent
949 visit[(parent.rev(), parent.node())] = parent
939 if not visit:
950 if not visit:
940 break
951 break
941 c = visit.pop(max(visit))
952 c = visit.pop(max(visit))
942 yield c
953 yield c
943
954
944 class filectx(basefilectx):
955 class filectx(basefilectx):
945 """A filecontext object makes access to data related to a particular
956 """A filecontext object makes access to data related to a particular
946 filerevision convenient."""
957 filerevision convenient."""
947 def __init__(self, repo, path, changeid=None, fileid=None,
958 def __init__(self, repo, path, changeid=None, fileid=None,
948 filelog=None, changectx=None):
959 filelog=None, changectx=None):
949 """changeid can be a changeset revision, node, or tag.
960 """changeid can be a changeset revision, node, or tag.
950 fileid can be a file revision or node."""
961 fileid can be a file revision or node."""
951 self._repo = repo
962 self._repo = repo
952 self._path = path
963 self._path = path
953
964
954 assert (changeid is not None
965 assert (changeid is not None
955 or fileid is not None
966 or fileid is not None
956 or changectx is not None), \
967 or changectx is not None), \
957 ("bad args: changeid=%r, fileid=%r, changectx=%r"
968 ("bad args: changeid=%r, fileid=%r, changectx=%r"
958 % (changeid, fileid, changectx))
969 % (changeid, fileid, changectx))
959
970
960 if filelog is not None:
971 if filelog is not None:
961 self._filelog = filelog
972 self._filelog = filelog
962
973
963 if changeid is not None:
974 if changeid is not None:
964 self._changeid = changeid
975 self._changeid = changeid
965 if changectx is not None:
976 if changectx is not None:
966 self._changectx = changectx
977 self._changectx = changectx
967 if fileid is not None:
978 if fileid is not None:
968 self._fileid = fileid
979 self._fileid = fileid
969
980
970 @propertycache
981 @propertycache
971 def _changectx(self):
982 def _changectx(self):
972 try:
983 try:
973 return changectx(self._repo, self._changeid)
984 return changectx(self._repo, self._changeid)
974 except error.FilteredRepoLookupError:
985 except error.FilteredRepoLookupError:
975 # Linkrev may point to any revision in the repository. When the
986 # Linkrev may point to any revision in the repository. When the
976 # repository is filtered this may lead to `filectx` trying to build
987 # repository is filtered this may lead to `filectx` trying to build
977 # `changectx` for filtered revision. In such case we fallback to
988 # `changectx` for filtered revision. In such case we fallback to
978 # creating `changectx` on the unfiltered version of the reposition.
989 # creating `changectx` on the unfiltered version of the reposition.
979 # This fallback should not be an issue because `changectx` from
990 # This fallback should not be an issue because `changectx` from
980 # `filectx` are not used in complex operations that care about
991 # `filectx` are not used in complex operations that care about
981 # filtering.
992 # filtering.
982 #
993 #
983 # This fallback is a cheap and dirty fix that prevent several
994 # This fallback is a cheap and dirty fix that prevent several
984 # crashes. It does not ensure the behavior is correct. However the
995 # crashes. It does not ensure the behavior is correct. However the
985 # behavior was not correct before filtering either and "incorrect
996 # behavior was not correct before filtering either and "incorrect
986 # behavior" is seen as better as "crash"
997 # behavior" is seen as better as "crash"
987 #
998 #
988 # Linkrevs have several serious troubles with filtering that are
999 # Linkrevs have several serious troubles with filtering that are
989 # complicated to solve. Proper handling of the issue here should be
1000 # complicated to solve. Proper handling of the issue here should be
990 # considered when solving linkrev issue are on the table.
1001 # considered when solving linkrev issue are on the table.
991 return changectx(self._repo.unfiltered(), self._changeid)
1002 return changectx(self._repo.unfiltered(), self._changeid)
992
1003
993 def filectx(self, fileid):
1004 def filectx(self, fileid):
994 '''opens an arbitrary revision of the file without
1005 '''opens an arbitrary revision of the file without
995 opening a new filelog'''
1006 opening a new filelog'''
996 return filectx(self._repo, self._path, fileid=fileid,
1007 return filectx(self._repo, self._path, fileid=fileid,
997 filelog=self._filelog)
1008 filelog=self._filelog)
998
1009
999 def data(self):
1010 def data(self):
1000 try:
1011 try:
1001 return self._filelog.read(self._filenode)
1012 return self._filelog.read(self._filenode)
1002 except error.CensoredNodeError:
1013 except error.CensoredNodeError:
1003 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1014 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1004 return ""
1015 return ""
1005 raise util.Abort(_("censored node: %s") % short(self._filenode),
1016 raise util.Abort(_("censored node: %s") % short(self._filenode),
1006 hint=_("set censor.policy to ignore errors"))
1017 hint=_("set censor.policy to ignore errors"))
1007
1018
1008 def size(self):
1019 def size(self):
1009 return self._filelog.size(self._filerev)
1020 return self._filelog.size(self._filerev)
1010
1021
1011 def renamed(self):
1022 def renamed(self):
1012 """check if file was actually renamed in this changeset revision
1023 """check if file was actually renamed in this changeset revision
1013
1024
1014 If rename logged in file revision, we report copy for changeset only
1025 If rename logged in file revision, we report copy for changeset only
1015 if file revisions linkrev points back to the changeset in question
1026 if file revisions linkrev points back to the changeset in question
1016 or both changeset parents contain different file revisions.
1027 or both changeset parents contain different file revisions.
1017 """
1028 """
1018
1029
1019 renamed = self._filelog.renamed(self._filenode)
1030 renamed = self._filelog.renamed(self._filenode)
1020 if not renamed:
1031 if not renamed:
1021 return renamed
1032 return renamed
1022
1033
1023 if self.rev() == self.linkrev():
1034 if self.rev() == self.linkrev():
1024 return renamed
1035 return renamed
1025
1036
1026 name = self.path()
1037 name = self.path()
1027 fnode = self._filenode
1038 fnode = self._filenode
1028 for p in self._changectx.parents():
1039 for p in self._changectx.parents():
1029 try:
1040 try:
1030 if fnode == p.filenode(name):
1041 if fnode == p.filenode(name):
1031 return None
1042 return None
1032 except error.LookupError:
1043 except error.LookupError:
1033 pass
1044 pass
1034 return renamed
1045 return renamed
1035
1046
1036 def children(self):
1047 def children(self):
1037 # hard for renames
1048 # hard for renames
1038 c = self._filelog.children(self._filenode)
1049 c = self._filelog.children(self._filenode)
1039 return [filectx(self._repo, self._path, fileid=x,
1050 return [filectx(self._repo, self._path, fileid=x,
1040 filelog=self._filelog) for x in c]
1051 filelog=self._filelog) for x in c]
1041
1052
1042 class committablectx(basectx):
1053 class committablectx(basectx):
1043 """A committablectx object provides common functionality for a context that
1054 """A committablectx object provides common functionality for a context that
1044 wants the ability to commit, e.g. workingctx or memctx."""
1055 wants the ability to commit, e.g. workingctx or memctx."""
1045 def __init__(self, repo, text="", user=None, date=None, extra=None,
1056 def __init__(self, repo, text="", user=None, date=None, extra=None,
1046 changes=None):
1057 changes=None):
1047 self._repo = repo
1058 self._repo = repo
1048 self._rev = None
1059 self._rev = None
1049 self._node = None
1060 self._node = None
1050 self._text = text
1061 self._text = text
1051 if date:
1062 if date:
1052 self._date = util.parsedate(date)
1063 self._date = util.parsedate(date)
1053 if user:
1064 if user:
1054 self._user = user
1065 self._user = user
1055 if changes:
1066 if changes:
1056 self._status = changes
1067 self._status = changes
1057
1068
1058 self._extra = {}
1069 self._extra = {}
1059 if extra:
1070 if extra:
1060 self._extra = extra.copy()
1071 self._extra = extra.copy()
1061 if 'branch' not in self._extra:
1072 if 'branch' not in self._extra:
1062 try:
1073 try:
1063 branch = encoding.fromlocal(self._repo.dirstate.branch())
1074 branch = encoding.fromlocal(self._repo.dirstate.branch())
1064 except UnicodeDecodeError:
1075 except UnicodeDecodeError:
1065 raise util.Abort(_('branch name not in UTF-8!'))
1076 raise util.Abort(_('branch name not in UTF-8!'))
1066 self._extra['branch'] = branch
1077 self._extra['branch'] = branch
1067 if self._extra['branch'] == '':
1078 if self._extra['branch'] == '':
1068 self._extra['branch'] = 'default'
1079 self._extra['branch'] = 'default'
1069
1080
1070 def __str__(self):
1081 def __str__(self):
1071 return str(self._parents[0]) + "+"
1082 return str(self._parents[0]) + "+"
1072
1083
1073 def __nonzero__(self):
1084 def __nonzero__(self):
1074 return True
1085 return True
1075
1086
1076 def _buildflagfunc(self):
1087 def _buildflagfunc(self):
1077 # Create a fallback function for getting file flags when the
1088 # Create a fallback function for getting file flags when the
1078 # filesystem doesn't support them
1089 # filesystem doesn't support them
1079
1090
1080 copiesget = self._repo.dirstate.copies().get
1091 copiesget = self._repo.dirstate.copies().get
1081
1092
1082 if len(self._parents) < 2:
1093 if len(self._parents) < 2:
1083 # when we have one parent, it's easy: copy from parent
1094 # when we have one parent, it's easy: copy from parent
1084 man = self._parents[0].manifest()
1095 man = self._parents[0].manifest()
1085 def func(f):
1096 def func(f):
1086 f = copiesget(f, f)
1097 f = copiesget(f, f)
1087 return man.flags(f)
1098 return man.flags(f)
1088 else:
1099 else:
1089 # merges are tricky: we try to reconstruct the unstored
1100 # merges are tricky: we try to reconstruct the unstored
1090 # result from the merge (issue1802)
1101 # result from the merge (issue1802)
1091 p1, p2 = self._parents
1102 p1, p2 = self._parents
1092 pa = p1.ancestor(p2)
1103 pa = p1.ancestor(p2)
1093 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1104 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1094
1105
1095 def func(f):
1106 def func(f):
1096 f = copiesget(f, f) # may be wrong for merges with copies
1107 f = copiesget(f, f) # may be wrong for merges with copies
1097 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1108 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1098 if fl1 == fl2:
1109 if fl1 == fl2:
1099 return fl1
1110 return fl1
1100 if fl1 == fla:
1111 if fl1 == fla:
1101 return fl2
1112 return fl2
1102 if fl2 == fla:
1113 if fl2 == fla:
1103 return fl1
1114 return fl1
1104 return '' # punt for conflicts
1115 return '' # punt for conflicts
1105
1116
1106 return func
1117 return func
1107
1118
1108 @propertycache
1119 @propertycache
1109 def _flagfunc(self):
1120 def _flagfunc(self):
1110 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1121 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1111
1122
1112 @propertycache
1123 @propertycache
1113 def _manifest(self):
1124 def _manifest(self):
1114 """generate a manifest corresponding to the values in self._status
1125 """generate a manifest corresponding to the values in self._status
1115
1126
1116 This reuse the file nodeid from parent, but we append an extra letter
1127 This reuse the file nodeid from parent, but we append an extra letter
1117 when modified. Modified files get an extra 'm' while added files get
1128 when modified. Modified files get an extra 'm' while added files get
1118 an extra 'a'. This is used by manifests merge to see that files
1129 an extra 'a'. This is used by manifests merge to see that files
1119 are different and by update logic to avoid deleting newly added files.
1130 are different and by update logic to avoid deleting newly added files.
1120 """
1131 """
1121
1132
1122 man1 = self._parents[0].manifest()
1133 man1 = self._parents[0].manifest()
1123 man = man1.copy()
1134 man = man1.copy()
1124 if len(self._parents) > 1:
1135 if len(self._parents) > 1:
1125 man2 = self.p2().manifest()
1136 man2 = self.p2().manifest()
1126 def getman(f):
1137 def getman(f):
1127 if f in man1:
1138 if f in man1:
1128 return man1
1139 return man1
1129 return man2
1140 return man2
1130 else:
1141 else:
1131 getman = lambda f: man1
1142 getman = lambda f: man1
1132
1143
1133 copied = self._repo.dirstate.copies()
1144 copied = self._repo.dirstate.copies()
1134 ff = self._flagfunc
1145 ff = self._flagfunc
1135 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1146 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1136 for f in l:
1147 for f in l:
1137 orig = copied.get(f, f)
1148 orig = copied.get(f, f)
1138 man[f] = getman(orig).get(orig, nullid) + i
1149 man[f] = getman(orig).get(orig, nullid) + i
1139 try:
1150 try:
1140 man.setflag(f, ff(f))
1151 man.setflag(f, ff(f))
1141 except OSError:
1152 except OSError:
1142 pass
1153 pass
1143
1154
1144 for f in self._status.deleted + self._status.removed:
1155 for f in self._status.deleted + self._status.removed:
1145 if f in man:
1156 if f in man:
1146 del man[f]
1157 del man[f]
1147
1158
1148 return man
1159 return man
1149
1160
1150 @propertycache
1161 @propertycache
1151 def _status(self):
1162 def _status(self):
1152 return self._repo.status()
1163 return self._repo.status()
1153
1164
1154 @propertycache
1165 @propertycache
1155 def _user(self):
1166 def _user(self):
1156 return self._repo.ui.username()
1167 return self._repo.ui.username()
1157
1168
1158 @propertycache
1169 @propertycache
1159 def _date(self):
1170 def _date(self):
1160 return util.makedate()
1171 return util.makedate()
1161
1172
1162 def subrev(self, subpath):
1173 def subrev(self, subpath):
1163 return None
1174 return None
1164
1175
1165 def user(self):
1176 def user(self):
1166 return self._user or self._repo.ui.username()
1177 return self._user or self._repo.ui.username()
1167 def date(self):
1178 def date(self):
1168 return self._date
1179 return self._date
1169 def description(self):
1180 def description(self):
1170 return self._text
1181 return self._text
1171 def files(self):
1182 def files(self):
1172 return sorted(self._status.modified + self._status.added +
1183 return sorted(self._status.modified + self._status.added +
1173 self._status.removed)
1184 self._status.removed)
1174
1185
1175 def modified(self):
1186 def modified(self):
1176 return self._status.modified
1187 return self._status.modified
1177 def added(self):
1188 def added(self):
1178 return self._status.added
1189 return self._status.added
1179 def removed(self):
1190 def removed(self):
1180 return self._status.removed
1191 return self._status.removed
1181 def deleted(self):
1192 def deleted(self):
1182 return self._status.deleted
1193 return self._status.deleted
1183 def branch(self):
1194 def branch(self):
1184 return encoding.tolocal(self._extra['branch'])
1195 return encoding.tolocal(self._extra['branch'])
1185 def closesbranch(self):
1196 def closesbranch(self):
1186 return 'close' in self._extra
1197 return 'close' in self._extra
1187 def extra(self):
1198 def extra(self):
1188 return self._extra
1199 return self._extra
1189
1200
1190 def tags(self):
1201 def tags(self):
1191 t = []
1202 t = []
1192 for p in self.parents():
1203 for p in self.parents():
1193 t.extend(p.tags())
1204 t.extend(p.tags())
1194 return t
1205 return t
1195
1206
1196 def bookmarks(self):
1207 def bookmarks(self):
1197 b = []
1208 b = []
1198 for p in self.parents():
1209 for p in self.parents():
1199 b.extend(p.bookmarks())
1210 b.extend(p.bookmarks())
1200 return b
1211 return b
1201
1212
1202 def phase(self):
1213 def phase(self):
1203 phase = phases.draft # default phase to draft
1214 phase = phases.draft # default phase to draft
1204 for p in self.parents():
1215 for p in self.parents():
1205 phase = max(phase, p.phase())
1216 phase = max(phase, p.phase())
1206 return phase
1217 return phase
1207
1218
1208 def hidden(self):
1219 def hidden(self):
1209 return False
1220 return False
1210
1221
1211 def children(self):
1222 def children(self):
1212 return []
1223 return []
1213
1224
1214 def flags(self, path):
1225 def flags(self, path):
1215 if '_manifest' in self.__dict__:
1226 if '_manifest' in self.__dict__:
1216 try:
1227 try:
1217 return self._manifest.flags(path)
1228 return self._manifest.flags(path)
1218 except KeyError:
1229 except KeyError:
1219 return ''
1230 return ''
1220
1231
1221 try:
1232 try:
1222 return self._flagfunc(path)
1233 return self._flagfunc(path)
1223 except OSError:
1234 except OSError:
1224 return ''
1235 return ''
1225
1236
1226 def ancestor(self, c2):
1237 def ancestor(self, c2):
1227 """return the "best" ancestor context of self and c2"""
1238 """return the "best" ancestor context of self and c2"""
1228 return self._parents[0].ancestor(c2) # punt on two parents for now
1239 return self._parents[0].ancestor(c2) # punt on two parents for now
1229
1240
1230 def walk(self, match):
1241 def walk(self, match):
1231 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1242 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1232 True, False))
1243 True, False))
1233
1244
1234 def matches(self, match):
1245 def matches(self, match):
1235 return sorted(self._repo.dirstate.matches(match))
1246 return sorted(self._repo.dirstate.matches(match))
1236
1247
1237 def ancestors(self):
1248 def ancestors(self):
1238 for p in self._parents:
1249 for p in self._parents:
1239 yield p
1250 yield p
1240 for a in self._repo.changelog.ancestors(
1251 for a in self._repo.changelog.ancestors(
1241 [p.rev() for p in self._parents]):
1252 [p.rev() for p in self._parents]):
1242 yield changectx(self._repo, a)
1253 yield changectx(self._repo, a)
1243
1254
1244 def markcommitted(self, node):
1255 def markcommitted(self, node):
1245 """Perform post-commit cleanup necessary after committing this ctx
1256 """Perform post-commit cleanup necessary after committing this ctx
1246
1257
1247 Specifically, this updates backing stores this working context
1258 Specifically, this updates backing stores this working context
1248 wraps to reflect the fact that the changes reflected by this
1259 wraps to reflect the fact that the changes reflected by this
1249 workingctx have been committed. For example, it marks
1260 workingctx have been committed. For example, it marks
1250 modified and added files as normal in the dirstate.
1261 modified and added files as normal in the dirstate.
1251
1262
1252 """
1263 """
1253
1264
1254 self._repo.dirstate.beginparentchange()
1265 self._repo.dirstate.beginparentchange()
1255 for f in self.modified() + self.added():
1266 for f in self.modified() + self.added():
1256 self._repo.dirstate.normal(f)
1267 self._repo.dirstate.normal(f)
1257 for f in self.removed():
1268 for f in self.removed():
1258 self._repo.dirstate.drop(f)
1269 self._repo.dirstate.drop(f)
1259 self._repo.dirstate.setparents(node)
1270 self._repo.dirstate.setparents(node)
1260 self._repo.dirstate.endparentchange()
1271 self._repo.dirstate.endparentchange()
1261
1272
1262 def dirs(self):
1273 def dirs(self):
1263 return self._repo.dirstate.dirs()
1274 return self._repo.dirstate.dirs()
1264
1275
1265 class workingctx(committablectx):
1276 class workingctx(committablectx):
1266 """A workingctx object makes access to data related to
1277 """A workingctx object makes access to data related to
1267 the current working directory convenient.
1278 the current working directory convenient.
1268 date - any valid date string or (unixtime, offset), or None.
1279 date - any valid date string or (unixtime, offset), or None.
1269 user - username string, or None.
1280 user - username string, or None.
1270 extra - a dictionary of extra values, or None.
1281 extra - a dictionary of extra values, or None.
1271 changes - a list of file lists as returned by localrepo.status()
1282 changes - a list of file lists as returned by localrepo.status()
1272 or None to use the repository status.
1283 or None to use the repository status.
1273 """
1284 """
1274 def __init__(self, repo, text="", user=None, date=None, extra=None,
1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1275 changes=None):
1286 changes=None):
1276 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1287 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1277
1288
1278 def __iter__(self):
1289 def __iter__(self):
1279 d = self._repo.dirstate
1290 d = self._repo.dirstate
1280 for f in d:
1291 for f in d:
1281 if d[f] != 'r':
1292 if d[f] != 'r':
1282 yield f
1293 yield f
1283
1294
1284 def __contains__(self, key):
1295 def __contains__(self, key):
1285 return self._repo.dirstate[key] not in "?r"
1296 return self._repo.dirstate[key] not in "?r"
1286
1297
1287 @propertycache
1298 @propertycache
1288 def _parents(self):
1299 def _parents(self):
1289 p = self._repo.dirstate.parents()
1300 p = self._repo.dirstate.parents()
1290 if p[1] == nullid:
1301 if p[1] == nullid:
1291 p = p[:-1]
1302 p = p[:-1]
1292 return [changectx(self._repo, x) for x in p]
1303 return [changectx(self._repo, x) for x in p]
1293
1304
1294 def filectx(self, path, filelog=None):
1305 def filectx(self, path, filelog=None):
1295 """get a file context from the working directory"""
1306 """get a file context from the working directory"""
1296 return workingfilectx(self._repo, path, workingctx=self,
1307 return workingfilectx(self._repo, path, workingctx=self,
1297 filelog=filelog)
1308 filelog=filelog)
1298
1309
1299 def dirty(self, missing=False, merge=True, branch=True):
1310 def dirty(self, missing=False, merge=True, branch=True):
1300 "check whether a working directory is modified"
1311 "check whether a working directory is modified"
1301 # check subrepos first
1312 # check subrepos first
1302 for s in sorted(self.substate):
1313 for s in sorted(self.substate):
1303 if self.sub(s).dirty():
1314 if self.sub(s).dirty():
1304 return True
1315 return True
1305 # check current working dir
1316 # check current working dir
1306 return ((merge and self.p2()) or
1317 return ((merge and self.p2()) or
1307 (branch and self.branch() != self.p1().branch()) or
1318 (branch and self.branch() != self.p1().branch()) or
1308 self.modified() or self.added() or self.removed() or
1319 self.modified() or self.added() or self.removed() or
1309 (missing and self.deleted()))
1320 (missing and self.deleted()))
1310
1321
1311 def add(self, list, prefix=""):
1322 def add(self, list, prefix=""):
1312 join = lambda f: os.path.join(prefix, f)
1323 join = lambda f: os.path.join(prefix, f)
1313 wlock = self._repo.wlock()
1324 wlock = self._repo.wlock()
1314 ui, ds = self._repo.ui, self._repo.dirstate
1325 ui, ds = self._repo.ui, self._repo.dirstate
1315 try:
1326 try:
1316 rejected = []
1327 rejected = []
1317 lstat = self._repo.wvfs.lstat
1328 lstat = self._repo.wvfs.lstat
1318 for f in list:
1329 for f in list:
1319 scmutil.checkportable(ui, join(f))
1330 scmutil.checkportable(ui, join(f))
1320 try:
1331 try:
1321 st = lstat(f)
1332 st = lstat(f)
1322 except OSError:
1333 except OSError:
1323 ui.warn(_("%s does not exist!\n") % join(f))
1334 ui.warn(_("%s does not exist!\n") % join(f))
1324 rejected.append(f)
1335 rejected.append(f)
1325 continue
1336 continue
1326 if st.st_size > 10000000:
1337 if st.st_size > 10000000:
1327 ui.warn(_("%s: up to %d MB of RAM may be required "
1338 ui.warn(_("%s: up to %d MB of RAM may be required "
1328 "to manage this file\n"
1339 "to manage this file\n"
1329 "(use 'hg revert %s' to cancel the "
1340 "(use 'hg revert %s' to cancel the "
1330 "pending addition)\n")
1341 "pending addition)\n")
1331 % (f, 3 * st.st_size // 1000000, join(f)))
1342 % (f, 3 * st.st_size // 1000000, join(f)))
1332 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1343 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1333 ui.warn(_("%s not added: only files and symlinks "
1344 ui.warn(_("%s not added: only files and symlinks "
1334 "supported currently\n") % join(f))
1345 "supported currently\n") % join(f))
1335 rejected.append(f)
1346 rejected.append(f)
1336 elif ds[f] in 'amn':
1347 elif ds[f] in 'amn':
1337 ui.warn(_("%s already tracked!\n") % join(f))
1348 ui.warn(_("%s already tracked!\n") % join(f))
1338 elif ds[f] == 'r':
1349 elif ds[f] == 'r':
1339 ds.normallookup(f)
1350 ds.normallookup(f)
1340 else:
1351 else:
1341 ds.add(f)
1352 ds.add(f)
1342 return rejected
1353 return rejected
1343 finally:
1354 finally:
1344 wlock.release()
1355 wlock.release()
1345
1356
1346 def forget(self, files, prefix=""):
1357 def forget(self, files, prefix=""):
1347 join = lambda f: os.path.join(prefix, f)
1358 join = lambda f: os.path.join(prefix, f)
1348 wlock = self._repo.wlock()
1359 wlock = self._repo.wlock()
1349 try:
1360 try:
1350 rejected = []
1361 rejected = []
1351 for f in files:
1362 for f in files:
1352 if f not in self._repo.dirstate:
1363 if f not in self._repo.dirstate:
1353 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1364 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1354 rejected.append(f)
1365 rejected.append(f)
1355 elif self._repo.dirstate[f] != 'a':
1366 elif self._repo.dirstate[f] != 'a':
1356 self._repo.dirstate.remove(f)
1367 self._repo.dirstate.remove(f)
1357 else:
1368 else:
1358 self._repo.dirstate.drop(f)
1369 self._repo.dirstate.drop(f)
1359 return rejected
1370 return rejected
1360 finally:
1371 finally:
1361 wlock.release()
1372 wlock.release()
1362
1373
1363 def undelete(self, list):
1374 def undelete(self, list):
1364 pctxs = self.parents()
1375 pctxs = self.parents()
1365 wlock = self._repo.wlock()
1376 wlock = self._repo.wlock()
1366 try:
1377 try:
1367 for f in list:
1378 for f in list:
1368 if self._repo.dirstate[f] != 'r':
1379 if self._repo.dirstate[f] != 'r':
1369 self._repo.ui.warn(_("%s not removed!\n") % f)
1380 self._repo.ui.warn(_("%s not removed!\n") % f)
1370 else:
1381 else:
1371 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1382 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1372 t = fctx.data()
1383 t = fctx.data()
1373 self._repo.wwrite(f, t, fctx.flags())
1384 self._repo.wwrite(f, t, fctx.flags())
1374 self._repo.dirstate.normal(f)
1385 self._repo.dirstate.normal(f)
1375 finally:
1386 finally:
1376 wlock.release()
1387 wlock.release()
1377
1388
1378 def copy(self, source, dest):
1389 def copy(self, source, dest):
1379 try:
1390 try:
1380 st = self._repo.wvfs.lstat(dest)
1391 st = self._repo.wvfs.lstat(dest)
1381 except OSError, err:
1392 except OSError, err:
1382 if err.errno != errno.ENOENT:
1393 if err.errno != errno.ENOENT:
1383 raise
1394 raise
1384 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1395 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1385 return
1396 return
1386 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1387 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1398 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1388 "symbolic link\n") % dest)
1399 "symbolic link\n") % dest)
1389 else:
1400 else:
1390 wlock = self._repo.wlock()
1401 wlock = self._repo.wlock()
1391 try:
1402 try:
1392 if self._repo.dirstate[dest] in '?':
1403 if self._repo.dirstate[dest] in '?':
1393 self._repo.dirstate.add(dest)
1404 self._repo.dirstate.add(dest)
1394 elif self._repo.dirstate[dest] in 'r':
1405 elif self._repo.dirstate[dest] in 'r':
1395 self._repo.dirstate.normallookup(dest)
1406 self._repo.dirstate.normallookup(dest)
1396 self._repo.dirstate.copy(source, dest)
1407 self._repo.dirstate.copy(source, dest)
1397 finally:
1408 finally:
1398 wlock.release()
1409 wlock.release()
1399
1410
1400 def _filtersuspectsymlink(self, files):
1411 def _filtersuspectsymlink(self, files):
1401 if not files or self._repo.dirstate._checklink:
1412 if not files or self._repo.dirstate._checklink:
1402 return files
1413 return files
1403
1414
1404 # Symlink placeholders may get non-symlink-like contents
1415 # Symlink placeholders may get non-symlink-like contents
1405 # via user error or dereferencing by NFS or Samba servers,
1416 # via user error or dereferencing by NFS or Samba servers,
1406 # so we filter out any placeholders that don't look like a
1417 # so we filter out any placeholders that don't look like a
1407 # symlink
1418 # symlink
1408 sane = []
1419 sane = []
1409 for f in files:
1420 for f in files:
1410 if self.flags(f) == 'l':
1421 if self.flags(f) == 'l':
1411 d = self[f].data()
1422 d = self[f].data()
1412 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1423 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1413 self._repo.ui.debug('ignoring suspect symlink placeholder'
1424 self._repo.ui.debug('ignoring suspect symlink placeholder'
1414 ' "%s"\n' % f)
1425 ' "%s"\n' % f)
1415 continue
1426 continue
1416 sane.append(f)
1427 sane.append(f)
1417 return sane
1428 return sane
1418
1429
1419 def _checklookup(self, files):
1430 def _checklookup(self, files):
1420 # check for any possibly clean files
1431 # check for any possibly clean files
1421 if not files:
1432 if not files:
1422 return [], []
1433 return [], []
1423
1434
1424 modified = []
1435 modified = []
1425 fixup = []
1436 fixup = []
1426 pctx = self._parents[0]
1437 pctx = self._parents[0]
1427 # do a full compare of any files that might have changed
1438 # do a full compare of any files that might have changed
1428 for f in sorted(files):
1439 for f in sorted(files):
1429 if (f not in pctx or self.flags(f) != pctx.flags(f)
1440 if (f not in pctx or self.flags(f) != pctx.flags(f)
1430 or pctx[f].cmp(self[f])):
1441 or pctx[f].cmp(self[f])):
1431 modified.append(f)
1442 modified.append(f)
1432 else:
1443 else:
1433 fixup.append(f)
1444 fixup.append(f)
1434
1445
1435 # update dirstate for files that are actually clean
1446 # update dirstate for files that are actually clean
1436 if fixup:
1447 if fixup:
1437 try:
1448 try:
1438 # updating the dirstate is optional
1449 # updating the dirstate is optional
1439 # so we don't wait on the lock
1450 # so we don't wait on the lock
1440 # wlock can invalidate the dirstate, so cache normal _after_
1451 # wlock can invalidate the dirstate, so cache normal _after_
1441 # taking the lock
1452 # taking the lock
1442 wlock = self._repo.wlock(False)
1453 wlock = self._repo.wlock(False)
1443 normal = self._repo.dirstate.normal
1454 normal = self._repo.dirstate.normal
1444 try:
1455 try:
1445 for f in fixup:
1456 for f in fixup:
1446 normal(f)
1457 normal(f)
1447 finally:
1458 finally:
1448 wlock.release()
1459 wlock.release()
1449 except error.LockError:
1460 except error.LockError:
1450 pass
1461 pass
1451 return modified, fixup
1462 return modified, fixup
1452
1463
1453 def _manifestmatches(self, match, s):
1464 def _manifestmatches(self, match, s):
1454 """Slow path for workingctx
1465 """Slow path for workingctx
1455
1466
1456 The fast path is when we compare the working directory to its parent
1467 The fast path is when we compare the working directory to its parent
1457 which means this function is comparing with a non-parent; therefore we
1468 which means this function is comparing with a non-parent; therefore we
1458 need to build a manifest and return what matches.
1469 need to build a manifest and return what matches.
1459 """
1470 """
1460 mf = self._repo['.']._manifestmatches(match, s)
1471 mf = self._repo['.']._manifestmatches(match, s)
1461 for f in s.modified + s.added:
1472 for f in s.modified + s.added:
1462 mf[f] = _newnode
1473 mf[f] = _newnode
1463 mf.setflag(f, self.flags(f))
1474 mf.setflag(f, self.flags(f))
1464 for f in s.removed:
1475 for f in s.removed:
1465 if f in mf:
1476 if f in mf:
1466 del mf[f]
1477 del mf[f]
1467 return mf
1478 return mf
1468
1479
1469 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1480 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1470 unknown=False):
1481 unknown=False):
1471 '''Gets the status from the dirstate -- internal use only.'''
1482 '''Gets the status from the dirstate -- internal use only.'''
1472 listignored, listclean, listunknown = ignored, clean, unknown
1483 listignored, listclean, listunknown = ignored, clean, unknown
1473 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1484 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1474 subrepos = []
1485 subrepos = []
1475 if '.hgsub' in self:
1486 if '.hgsub' in self:
1476 subrepos = sorted(self.substate)
1487 subrepos = sorted(self.substate)
1477 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1488 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1478 listclean, listunknown)
1489 listclean, listunknown)
1479
1490
1480 # check for any possibly clean files
1491 # check for any possibly clean files
1481 if cmp:
1492 if cmp:
1482 modified2, fixup = self._checklookup(cmp)
1493 modified2, fixup = self._checklookup(cmp)
1483 s.modified.extend(modified2)
1494 s.modified.extend(modified2)
1484
1495
1485 # update dirstate for files that are actually clean
1496 # update dirstate for files that are actually clean
1486 if fixup and listclean:
1497 if fixup and listclean:
1487 s.clean.extend(fixup)
1498 s.clean.extend(fixup)
1488
1499
1489 return s
1500 return s
1490
1501
1491 def _buildstatus(self, other, s, match, listignored, listclean,
1502 def _buildstatus(self, other, s, match, listignored, listclean,
1492 listunknown):
1503 listunknown):
1493 """build a status with respect to another context
1504 """build a status with respect to another context
1494
1505
1495 This includes logic for maintaining the fast path of status when
1506 This includes logic for maintaining the fast path of status when
1496 comparing the working directory against its parent, which is to skip
1507 comparing the working directory against its parent, which is to skip
1497 building a new manifest if self (working directory) is not comparing
1508 building a new manifest if self (working directory) is not comparing
1498 against its parent (repo['.']).
1509 against its parent (repo['.']).
1499 """
1510 """
1500 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1511 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1501 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1512 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1502 # might have accidentally ended up with the entire contents of the file
1513 # might have accidentally ended up with the entire contents of the file
1503 # they are supposed to be linking to.
1514 # they are supposed to be linking to.
1504 s.modified[:] = self._filtersuspectsymlink(s.modified)
1515 s.modified[:] = self._filtersuspectsymlink(s.modified)
1505 if other != self._repo['.']:
1516 if other != self._repo['.']:
1506 s = super(workingctx, self)._buildstatus(other, s, match,
1517 s = super(workingctx, self)._buildstatus(other, s, match,
1507 listignored, listclean,
1518 listignored, listclean,
1508 listunknown)
1519 listunknown)
1509 elif match.always():
1520 elif match.always():
1510 # cache for performance
1521 # cache for performance
1511 if s.unknown or s.ignored or s.clean:
1522 if s.unknown or s.ignored or s.clean:
1512 # "_status" is cached with list*=False in the normal route
1523 # "_status" is cached with list*=False in the normal route
1513 self._status = scmutil.status(s.modified, s.added, s.removed,
1524 self._status = scmutil.status(s.modified, s.added, s.removed,
1514 s.deleted, [], [], [])
1525 s.deleted, [], [], [])
1515 else:
1526 else:
1516 self._status = s
1527 self._status = s
1517 return s
1528 return s
1518
1529
1519 def _matchstatus(self, other, match):
1530 def _matchstatus(self, other, match):
1520 """override the match method with a filter for directory patterns
1531 """override the match method with a filter for directory patterns
1521
1532
1522 We use inheritance to customize the match.bad method only in cases of
1533 We use inheritance to customize the match.bad method only in cases of
1523 workingctx since it belongs only to the working directory when
1534 workingctx since it belongs only to the working directory when
1524 comparing against the parent changeset.
1535 comparing against the parent changeset.
1525
1536
1526 If we aren't comparing against the working directory's parent, then we
1537 If we aren't comparing against the working directory's parent, then we
1527 just use the default match object sent to us.
1538 just use the default match object sent to us.
1528 """
1539 """
1529 superself = super(workingctx, self)
1540 superself = super(workingctx, self)
1530 match = superself._matchstatus(other, match)
1541 match = superself._matchstatus(other, match)
1531 if other != self._repo['.']:
1542 if other != self._repo['.']:
1532 def bad(f, msg):
1543 def bad(f, msg):
1533 # 'f' may be a directory pattern from 'match.files()',
1544 # 'f' may be a directory pattern from 'match.files()',
1534 # so 'f not in ctx1' is not enough
1545 # so 'f not in ctx1' is not enough
1535 if f not in other and f not in other.dirs():
1546 if f not in other and f not in other.dirs():
1536 self._repo.ui.warn('%s: %s\n' %
1547 self._repo.ui.warn('%s: %s\n' %
1537 (self._repo.dirstate.pathto(f), msg))
1548 (self._repo.dirstate.pathto(f), msg))
1538 match.bad = bad
1549 match.bad = bad
1539 return match
1550 return match
1540
1551
1541 class committablefilectx(basefilectx):
1552 class committablefilectx(basefilectx):
1542 """A committablefilectx provides common functionality for a file context
1553 """A committablefilectx provides common functionality for a file context
1543 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1554 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1544 def __init__(self, repo, path, filelog=None, ctx=None):
1555 def __init__(self, repo, path, filelog=None, ctx=None):
1545 self._repo = repo
1556 self._repo = repo
1546 self._path = path
1557 self._path = path
1547 self._changeid = None
1558 self._changeid = None
1548 self._filerev = self._filenode = None
1559 self._filerev = self._filenode = None
1549
1560
1550 if filelog is not None:
1561 if filelog is not None:
1551 self._filelog = filelog
1562 self._filelog = filelog
1552 if ctx:
1563 if ctx:
1553 self._changectx = ctx
1564 self._changectx = ctx
1554
1565
1555 def __nonzero__(self):
1566 def __nonzero__(self):
1556 return True
1567 return True
1557
1568
1558 def parents(self):
1569 def parents(self):
1559 '''return parent filectxs, following copies if necessary'''
1570 '''return parent filectxs, following copies if necessary'''
1560 def filenode(ctx, path):
1571 def filenode(ctx, path):
1561 return ctx._manifest.get(path, nullid)
1572 return ctx._manifest.get(path, nullid)
1562
1573
1563 path = self._path
1574 path = self._path
1564 fl = self._filelog
1575 fl = self._filelog
1565 pcl = self._changectx._parents
1576 pcl = self._changectx._parents
1566 renamed = self.renamed()
1577 renamed = self.renamed()
1567
1578
1568 if renamed:
1579 if renamed:
1569 pl = [renamed + (None,)]
1580 pl = [renamed + (None,)]
1570 else:
1581 else:
1571 pl = [(path, filenode(pcl[0], path), fl)]
1582 pl = [(path, filenode(pcl[0], path), fl)]
1572
1583
1573 for pc in pcl[1:]:
1584 for pc in pcl[1:]:
1574 pl.append((path, filenode(pc, path), fl))
1585 pl.append((path, filenode(pc, path), fl))
1575
1586
1576 return [filectx(self._repo, p, fileid=n, filelog=l)
1587 return [filectx(self._repo, p, fileid=n, filelog=l)
1577 for p, n, l in pl if n != nullid]
1588 for p, n, l in pl if n != nullid]
1578
1589
1579 def children(self):
1590 def children(self):
1580 return []
1591 return []
1581
1592
1582 class workingfilectx(committablefilectx):
1593 class workingfilectx(committablefilectx):
1583 """A workingfilectx object makes access to data related to a particular
1594 """A workingfilectx object makes access to data related to a particular
1584 file in the working directory convenient."""
1595 file in the working directory convenient."""
1585 def __init__(self, repo, path, filelog=None, workingctx=None):
1596 def __init__(self, repo, path, filelog=None, workingctx=None):
1586 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1597 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1587
1598
1588 @propertycache
1599 @propertycache
1589 def _changectx(self):
1600 def _changectx(self):
1590 return workingctx(self._repo)
1601 return workingctx(self._repo)
1591
1602
1592 def data(self):
1603 def data(self):
1593 return self._repo.wread(self._path)
1604 return self._repo.wread(self._path)
1594 def renamed(self):
1605 def renamed(self):
1595 rp = self._repo.dirstate.copied(self._path)
1606 rp = self._repo.dirstate.copied(self._path)
1596 if not rp:
1607 if not rp:
1597 return None
1608 return None
1598 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1609 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1599
1610
1600 def size(self):
1611 def size(self):
1601 return self._repo.wvfs.lstat(self._path).st_size
1612 return self._repo.wvfs.lstat(self._path).st_size
1602 def date(self):
1613 def date(self):
1603 t, tz = self._changectx.date()
1614 t, tz = self._changectx.date()
1604 try:
1615 try:
1605 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1616 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1606 except OSError, err:
1617 except OSError, err:
1607 if err.errno != errno.ENOENT:
1618 if err.errno != errno.ENOENT:
1608 raise
1619 raise
1609 return (t, tz)
1620 return (t, tz)
1610
1621
1611 def cmp(self, fctx):
1622 def cmp(self, fctx):
1612 """compare with other file context
1623 """compare with other file context
1613
1624
1614 returns True if different than fctx.
1625 returns True if different than fctx.
1615 """
1626 """
1616 # fctx should be a filectx (not a workingfilectx)
1627 # fctx should be a filectx (not a workingfilectx)
1617 # invert comparison to reuse the same code path
1628 # invert comparison to reuse the same code path
1618 return fctx.cmp(self)
1629 return fctx.cmp(self)
1619
1630
1620 def remove(self, ignoremissing=False):
1631 def remove(self, ignoremissing=False):
1621 """wraps unlink for a repo's working directory"""
1632 """wraps unlink for a repo's working directory"""
1622 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1633 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1623
1634
1624 def write(self, data, flags):
1635 def write(self, data, flags):
1625 """wraps repo.wwrite"""
1636 """wraps repo.wwrite"""
1626 self._repo.wwrite(self._path, data, flags)
1637 self._repo.wwrite(self._path, data, flags)
1627
1638
1628 class workingcommitctx(workingctx):
1639 class workingcommitctx(workingctx):
1629 """A workingcommitctx object makes access to data related to
1640 """A workingcommitctx object makes access to data related to
1630 the revision being committed convenient.
1641 the revision being committed convenient.
1631
1642
1632 This hides changes in the working directory, if they aren't
1643 This hides changes in the working directory, if they aren't
1633 committed in this context.
1644 committed in this context.
1634 """
1645 """
1635 def __init__(self, repo, changes,
1646 def __init__(self, repo, changes,
1636 text="", user=None, date=None, extra=None):
1647 text="", user=None, date=None, extra=None):
1637 super(workingctx, self).__init__(repo, text, user, date, extra,
1648 super(workingctx, self).__init__(repo, text, user, date, extra,
1638 changes)
1649 changes)
1639
1650
1640 def _buildstatus(self, other, s, match,
1651 def _buildstatus(self, other, s, match,
1641 listignored, listclean, listunknown):
1652 listignored, listclean, listunknown):
1642 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1653 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1643 """
1654 """
1644 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1655 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1645 if other != self._repo['.']:
1656 if other != self._repo['.']:
1646 # workingctx._buildstatus doesn't change self._status in this case
1657 # workingctx._buildstatus doesn't change self._status in this case
1647 superself = super(workingcommitctx, self)
1658 superself = super(workingcommitctx, self)
1648 s = superself._buildstatus(other, s, match,
1659 s = superself._buildstatus(other, s, match,
1649 listignored, listclean, listunknown)
1660 listignored, listclean, listunknown)
1650 return s
1661 return s
1651
1662
1652 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1663 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1653 unknown=False):
1664 unknown=False):
1654 """Return matched files only in ``self._status``
1665 """Return matched files only in ``self._status``
1655
1666
1656 Uncommitted files appear "clean" via this context, even if
1667 Uncommitted files appear "clean" via this context, even if
1657 they aren't actually so in the working directory.
1668 they aren't actually so in the working directory.
1658 """
1669 """
1659 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1670 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1660 if clean:
1671 if clean:
1661 clean = [f for f in self._manifest if f not in self._changedset]
1672 clean = [f for f in self._manifest if f not in self._changedset]
1662 else:
1673 else:
1663 clean = []
1674 clean = []
1664 return scmutil.status([f for f in self._status.modified if match(f)],
1675 return scmutil.status([f for f in self._status.modified if match(f)],
1665 [f for f in self._status.added if match(f)],
1676 [f for f in self._status.added if match(f)],
1666 [f for f in self._status.removed if match(f)],
1677 [f for f in self._status.removed if match(f)],
1667 [], [], [], clean)
1678 [], [], [], clean)
1668
1679
1669 @propertycache
1680 @propertycache
1670 def _changedset(self):
1681 def _changedset(self):
1671 """Return the set of files changed in this context
1682 """Return the set of files changed in this context
1672 """
1683 """
1673 changed = set(self._status.modified)
1684 changed = set(self._status.modified)
1674 changed.update(self._status.added)
1685 changed.update(self._status.added)
1675 changed.update(self._status.removed)
1686 changed.update(self._status.removed)
1676 return changed
1687 return changed
1677
1688
1678 class memctx(committablectx):
1689 class memctx(committablectx):
1679 """Use memctx to perform in-memory commits via localrepo.commitctx().
1690 """Use memctx to perform in-memory commits via localrepo.commitctx().
1680
1691
1681 Revision information is supplied at initialization time while
1692 Revision information is supplied at initialization time while
1682 related files data and is made available through a callback
1693 related files data and is made available through a callback
1683 mechanism. 'repo' is the current localrepo, 'parents' is a
1694 mechanism. 'repo' is the current localrepo, 'parents' is a
1684 sequence of two parent revisions identifiers (pass None for every
1695 sequence of two parent revisions identifiers (pass None for every
1685 missing parent), 'text' is the commit message and 'files' lists
1696 missing parent), 'text' is the commit message and 'files' lists
1686 names of files touched by the revision (normalized and relative to
1697 names of files touched by the revision (normalized and relative to
1687 repository root).
1698 repository root).
1688
1699
1689 filectxfn(repo, memctx, path) is a callable receiving the
1700 filectxfn(repo, memctx, path) is a callable receiving the
1690 repository, the current memctx object and the normalized path of
1701 repository, the current memctx object and the normalized path of
1691 requested file, relative to repository root. It is fired by the
1702 requested file, relative to repository root. It is fired by the
1692 commit function for every file in 'files', but calls order is
1703 commit function for every file in 'files', but calls order is
1693 undefined. If the file is available in the revision being
1704 undefined. If the file is available in the revision being
1694 committed (updated or added), filectxfn returns a memfilectx
1705 committed (updated or added), filectxfn returns a memfilectx
1695 object. If the file was removed, filectxfn raises an
1706 object. If the file was removed, filectxfn raises an
1696 IOError. Moved files are represented by marking the source file
1707 IOError. Moved files are represented by marking the source file
1697 removed and the new file added with copy information (see
1708 removed and the new file added with copy information (see
1698 memfilectx).
1709 memfilectx).
1699
1710
1700 user receives the committer name and defaults to current
1711 user receives the committer name and defaults to current
1701 repository username, date is the commit date in any format
1712 repository username, date is the commit date in any format
1702 supported by util.parsedate() and defaults to current date, extra
1713 supported by util.parsedate() and defaults to current date, extra
1703 is a dictionary of metadata or is left empty.
1714 is a dictionary of metadata or is left empty.
1704 """
1715 """
1705
1716
1706 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1717 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1707 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1718 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1708 # this field to determine what to do in filectxfn.
1719 # this field to determine what to do in filectxfn.
1709 _returnnoneformissingfiles = True
1720 _returnnoneformissingfiles = True
1710
1721
1711 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1722 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1712 date=None, extra=None, editor=False):
1723 date=None, extra=None, editor=False):
1713 super(memctx, self).__init__(repo, text, user, date, extra)
1724 super(memctx, self).__init__(repo, text, user, date, extra)
1714 self._rev = None
1725 self._rev = None
1715 self._node = None
1726 self._node = None
1716 parents = [(p or nullid) for p in parents]
1727 parents = [(p or nullid) for p in parents]
1717 p1, p2 = parents
1728 p1, p2 = parents
1718 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1729 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1719 files = sorted(set(files))
1730 files = sorted(set(files))
1720 self._files = files
1731 self._files = files
1721 self.substate = {}
1732 self.substate = {}
1722
1733
1723 # if store is not callable, wrap it in a function
1734 # if store is not callable, wrap it in a function
1724 if not callable(filectxfn):
1735 if not callable(filectxfn):
1725 def getfilectx(repo, memctx, path):
1736 def getfilectx(repo, memctx, path):
1726 fctx = filectxfn[path]
1737 fctx = filectxfn[path]
1727 # this is weird but apparently we only keep track of one parent
1738 # this is weird but apparently we only keep track of one parent
1728 # (why not only store that instead of a tuple?)
1739 # (why not only store that instead of a tuple?)
1729 copied = fctx.renamed()
1740 copied = fctx.renamed()
1730 if copied:
1741 if copied:
1731 copied = copied[0]
1742 copied = copied[0]
1732 return memfilectx(repo, path, fctx.data(),
1743 return memfilectx(repo, path, fctx.data(),
1733 islink=fctx.islink(), isexec=fctx.isexec(),
1744 islink=fctx.islink(), isexec=fctx.isexec(),
1734 copied=copied, memctx=memctx)
1745 copied=copied, memctx=memctx)
1735 self._filectxfn = getfilectx
1746 self._filectxfn = getfilectx
1736 else:
1747 else:
1737 # "util.cachefunc" reduces invocation of possibly expensive
1748 # "util.cachefunc" reduces invocation of possibly expensive
1738 # "filectxfn" for performance (e.g. converting from another VCS)
1749 # "filectxfn" for performance (e.g. converting from another VCS)
1739 self._filectxfn = util.cachefunc(filectxfn)
1750 self._filectxfn = util.cachefunc(filectxfn)
1740
1751
1741 self._extra = extra and extra.copy() or {}
1752 self._extra = extra and extra.copy() or {}
1742 if self._extra.get('branch', '') == '':
1753 if self._extra.get('branch', '') == '':
1743 self._extra['branch'] = 'default'
1754 self._extra['branch'] = 'default'
1744
1755
1745 if editor:
1756 if editor:
1746 self._text = editor(self._repo, self, [])
1757 self._text = editor(self._repo, self, [])
1747 self._repo.savecommitmessage(self._text)
1758 self._repo.savecommitmessage(self._text)
1748
1759
1749 def filectx(self, path, filelog=None):
1760 def filectx(self, path, filelog=None):
1750 """get a file context from the working directory
1761 """get a file context from the working directory
1751
1762
1752 Returns None if file doesn't exist and should be removed."""
1763 Returns None if file doesn't exist and should be removed."""
1753 return self._filectxfn(self._repo, self, path)
1764 return self._filectxfn(self._repo, self, path)
1754
1765
1755 def commit(self):
1766 def commit(self):
1756 """commit context to the repo"""
1767 """commit context to the repo"""
1757 return self._repo.commitctx(self)
1768 return self._repo.commitctx(self)
1758
1769
1759 @propertycache
1770 @propertycache
1760 def _manifest(self):
1771 def _manifest(self):
1761 """generate a manifest based on the return values of filectxfn"""
1772 """generate a manifest based on the return values of filectxfn"""
1762
1773
1763 # keep this simple for now; just worry about p1
1774 # keep this simple for now; just worry about p1
1764 pctx = self._parents[0]
1775 pctx = self._parents[0]
1765 man = pctx.manifest().copy()
1776 man = pctx.manifest().copy()
1766
1777
1767 for f in self._status.modified:
1778 for f in self._status.modified:
1768 p1node = nullid
1779 p1node = nullid
1769 p2node = nullid
1780 p2node = nullid
1770 p = pctx[f].parents() # if file isn't in pctx, check p2?
1781 p = pctx[f].parents() # if file isn't in pctx, check p2?
1771 if len(p) > 0:
1782 if len(p) > 0:
1772 p1node = p[0].node()
1783 p1node = p[0].node()
1773 if len(p) > 1:
1784 if len(p) > 1:
1774 p2node = p[1].node()
1785 p2node = p[1].node()
1775 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1786 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1776
1787
1777 for f in self._status.added:
1788 for f in self._status.added:
1778 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1789 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1779
1790
1780 for f in self._status.removed:
1791 for f in self._status.removed:
1781 if f in man:
1792 if f in man:
1782 del man[f]
1793 del man[f]
1783
1794
1784 return man
1795 return man
1785
1796
1786 @propertycache
1797 @propertycache
1787 def _status(self):
1798 def _status(self):
1788 """Calculate exact status from ``files`` specified at construction
1799 """Calculate exact status from ``files`` specified at construction
1789 """
1800 """
1790 man1 = self.p1().manifest()
1801 man1 = self.p1().manifest()
1791 p2 = self._parents[1]
1802 p2 = self._parents[1]
1792 # "1 < len(self._parents)" can't be used for checking
1803 # "1 < len(self._parents)" can't be used for checking
1793 # existence of the 2nd parent, because "memctx._parents" is
1804 # existence of the 2nd parent, because "memctx._parents" is
1794 # explicitly initialized by the list, of which length is 2.
1805 # explicitly initialized by the list, of which length is 2.
1795 if p2.node() != nullid:
1806 if p2.node() != nullid:
1796 man2 = p2.manifest()
1807 man2 = p2.manifest()
1797 managing = lambda f: f in man1 or f in man2
1808 managing = lambda f: f in man1 or f in man2
1798 else:
1809 else:
1799 managing = lambda f: f in man1
1810 managing = lambda f: f in man1
1800
1811
1801 modified, added, removed = [], [], []
1812 modified, added, removed = [], [], []
1802 for f in self._files:
1813 for f in self._files:
1803 if not managing(f):
1814 if not managing(f):
1804 added.append(f)
1815 added.append(f)
1805 elif self[f]:
1816 elif self[f]:
1806 modified.append(f)
1817 modified.append(f)
1807 else:
1818 else:
1808 removed.append(f)
1819 removed.append(f)
1809
1820
1810 return scmutil.status(modified, added, removed, [], [], [], [])
1821 return scmutil.status(modified, added, removed, [], [], [], [])
1811
1822
1812 class memfilectx(committablefilectx):
1823 class memfilectx(committablefilectx):
1813 """memfilectx represents an in-memory file to commit.
1824 """memfilectx represents an in-memory file to commit.
1814
1825
1815 See memctx and committablefilectx for more details.
1826 See memctx and committablefilectx for more details.
1816 """
1827 """
1817 def __init__(self, repo, path, data, islink=False,
1828 def __init__(self, repo, path, data, islink=False,
1818 isexec=False, copied=None, memctx=None):
1829 isexec=False, copied=None, memctx=None):
1819 """
1830 """
1820 path is the normalized file path relative to repository root.
1831 path is the normalized file path relative to repository root.
1821 data is the file content as a string.
1832 data is the file content as a string.
1822 islink is True if the file is a symbolic link.
1833 islink is True if the file is a symbolic link.
1823 isexec is True if the file is executable.
1834 isexec is True if the file is executable.
1824 copied is the source file path if current file was copied in the
1835 copied is the source file path if current file was copied in the
1825 revision being committed, or None."""
1836 revision being committed, or None."""
1826 super(memfilectx, self).__init__(repo, path, None, memctx)
1837 super(memfilectx, self).__init__(repo, path, None, memctx)
1827 self._data = data
1838 self._data = data
1828 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1839 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1829 self._copied = None
1840 self._copied = None
1830 if copied:
1841 if copied:
1831 self._copied = (copied, nullid)
1842 self._copied = (copied, nullid)
1832
1843
1833 def data(self):
1844 def data(self):
1834 return self._data
1845 return self._data
1835 def size(self):
1846 def size(self):
1836 return len(self.data())
1847 return len(self.data())
1837 def flags(self):
1848 def flags(self):
1838 return self._flags
1849 return self._flags
1839 def renamed(self):
1850 def renamed(self):
1840 return self._copied
1851 return self._copied
1841
1852
1842 def remove(self, ignoremissing=False):
1853 def remove(self, ignoremissing=False):
1843 """wraps unlink for a repo's working directory"""
1854 """wraps unlink for a repo's working directory"""
1844 # need to figure out what to do here
1855 # need to figure out what to do here
1845 del self._changectx[self._path]
1856 del self._changectx[self._path]
1846
1857
1847 def write(self, data, flags):
1858 def write(self, data, flags):
1848 """wraps repo.wwrite"""
1859 """wraps repo.wwrite"""
1849 self._data = data
1860 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now