##// END OF EJS Templates
context: use new manifest.diff(clean=True) support...
Augie Fackler -
r23757:b5346480 default
parent child Browse files
Show More
@@ -1,1860 +1,1859
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
25 def _adjustlinkrev(repo, path, filelog, fnode, srcrev, inclusive=False):
26 """return the first ancestor of <srcrev> introducting <fnode>
26 """return the first ancestor of <srcrev> introducting <fnode>
27
27
28 If the linkrev of the file revision does not point to an ancestor of
28 If the linkrev of the file revision does not point to an ancestor of
29 srcrev, we'll walk down the ancestors until we find one introducing this
29 srcrev, we'll walk down the ancestors until we find one introducing this
30 file revision.
30 file revision.
31
31
32 :repo: a localrepository object (used to access changelog and manifest)
32 :repo: a localrepository object (used to access changelog and manifest)
33 :path: the file path
33 :path: the file path
34 :fnode: the nodeid of the file revision
34 :fnode: the nodeid of the file revision
35 :filelog: the filelog of this path
35 :filelog: the filelog of this path
36 :srcrev: the changeset revision we search ancestors from
36 :srcrev: the changeset revision we search ancestors from
37 :inclusive: if true, the src revision will also be checked
37 :inclusive: if true, the src revision will also be checked
38 """
38 """
39 cl = repo.unfiltered().changelog
39 cl = repo.unfiltered().changelog
40 ma = repo.manifest
40 ma = repo.manifest
41 # fetch the linkrev
41 # fetch the linkrev
42 fr = filelog.rev(fnode)
42 fr = filelog.rev(fnode)
43 lkr = filelog.linkrev(fr)
43 lkr = filelog.linkrev(fr)
44 # check if this linkrev is an ancestor of srcrev
44 # check if this linkrev is an ancestor of srcrev
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
45 anc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
46 if lkr not in anc:
46 if lkr not in anc:
47 for a in anc:
47 for a in anc:
48 ac = cl.read(a) # get changeset data (we avoid object creation).
48 ac = cl.read(a) # get changeset data (we avoid object creation).
49 if path in ac[3]: # checking the 'files' field.
49 if path in ac[3]: # checking the 'files' field.
50 # The file has been touched, check if the content is similar
50 # The file has been touched, check if the content is similar
51 # to the one we search for.
51 # to the one we search for.
52 if fnode == ma.readdelta(ac[0]).get(path):
52 if fnode == ma.readdelta(ac[0]).get(path):
53 return a
53 return a
54 # In theory, we should never get out of that loop without a result. But
54 # In theory, we should never get out of that loop without a result. But
55 # if manifest uses a buggy file revision (not children of the one it
55 # if manifest uses a buggy file revision (not children of the one it
56 # replaces) we could. Such a buggy situation will likely result is crash
56 # replaces) we could. Such a buggy situation will likely result is crash
57 # somewhere else at to some point.
57 # somewhere else at to some point.
58 return lkr
58 return lkr
59
59
60 class basectx(object):
60 class basectx(object):
61 """A basectx object represents the common logic for its children:
61 """A basectx object represents the common logic for its children:
62 changectx: read-only context that is already present in the repo,
62 changectx: read-only context that is already present in the repo,
63 workingctx: a context that represents the working directory and can
63 workingctx: a context that represents the working directory and can
64 be committed,
64 be committed,
65 memctx: a context that represents changes in-memory and can also
65 memctx: a context that represents changes in-memory and can also
66 be committed."""
66 be committed."""
67 def __new__(cls, repo, changeid='', *args, **kwargs):
67 def __new__(cls, repo, changeid='', *args, **kwargs):
68 if isinstance(changeid, basectx):
68 if isinstance(changeid, basectx):
69 return changeid
69 return changeid
70
70
71 o = super(basectx, cls).__new__(cls)
71 o = super(basectx, cls).__new__(cls)
72
72
73 o._repo = repo
73 o._repo = repo
74 o._rev = nullrev
74 o._rev = nullrev
75 o._node = nullid
75 o._node = nullid
76
76
77 return o
77 return o
78
78
79 def __str__(self):
79 def __str__(self):
80 return short(self.node())
80 return short(self.node())
81
81
82 def __int__(self):
82 def __int__(self):
83 return self.rev()
83 return self.rev()
84
84
85 def __repr__(self):
85 def __repr__(self):
86 return "<%s %s>" % (type(self).__name__, str(self))
86 return "<%s %s>" % (type(self).__name__, str(self))
87
87
88 def __eq__(self, other):
88 def __eq__(self, other):
89 try:
89 try:
90 return type(self) == type(other) and self._rev == other._rev
90 return type(self) == type(other) and self._rev == other._rev
91 except AttributeError:
91 except AttributeError:
92 return False
92 return False
93
93
94 def __ne__(self, other):
94 def __ne__(self, other):
95 return not (self == other)
95 return not (self == other)
96
96
97 def __contains__(self, key):
97 def __contains__(self, key):
98 return key in self._manifest
98 return key in self._manifest
99
99
100 def __getitem__(self, key):
100 def __getitem__(self, key):
101 return self.filectx(key)
101 return self.filectx(key)
102
102
103 def __iter__(self):
103 def __iter__(self):
104 for f in sorted(self._manifest):
104 for f in sorted(self._manifest):
105 yield f
105 yield f
106
106
107 def _manifestmatches(self, match, s):
107 def _manifestmatches(self, match, s):
108 """generate a new manifest filtered by the match argument
108 """generate a new manifest filtered by the match argument
109
109
110 This method is for internal use only and mainly exists to provide an
110 This method is for internal use only and mainly exists to provide an
111 object oriented way for other contexts to customize the manifest
111 object oriented way for other contexts to customize the manifest
112 generation.
112 generation.
113 """
113 """
114 return self.manifest().matches(match)
114 return self.manifest().matches(match)
115
115
116 def _matchstatus(self, other, match):
116 def _matchstatus(self, other, match):
117 """return match.always if match is none
117 """return match.always if match is none
118
118
119 This internal method provides a way for child objects to override the
119 This internal method provides a way for child objects to override the
120 match operator.
120 match operator.
121 """
121 """
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
122 return match or matchmod.always(self._repo.root, self._repo.getcwd())
123
123
124 def _buildstatus(self, other, s, match, listignored, listclean,
124 def _buildstatus(self, other, s, match, listignored, listclean,
125 listunknown):
125 listunknown):
126 """build a status with respect to another context"""
126 """build a status with respect to another context"""
127 # Load earliest manifest first for caching reasons. More specifically,
127 # Load earliest manifest first for caching reasons. More specifically,
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
128 # if you have revisions 1000 and 1001, 1001 is probably stored as a
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
129 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
130 # 1000 and cache it so that when you read 1001, we just need to apply a
130 # 1000 and cache it so that when you read 1001, we just need to apply a
131 # delta to what's in the cache. So that's one full reconstruction + one
131 # delta to what's in the cache. So that's one full reconstruction + one
132 # delta application.
132 # delta application.
133 if self.rev() is not None and self.rev() < other.rev():
133 if self.rev() is not None and self.rev() < other.rev():
134 self.manifest()
134 self.manifest()
135 mf1 = other._manifestmatches(match, s)
135 mf1 = other._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
136 mf2 = self._manifestmatches(match, s)
137
137
138 modified, added = [], []
138 modified, added = [], []
139 removed = []
139 removed = []
140 clean = set()
140 clean = []
141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
141 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
142 deletedset = set(deleted)
142 deletedset = set(deleted)
143 d = mf1.diff(mf2)
143 d = mf1.diff(mf2, clean=listclean)
144 for fn, ((node1, flag1), (node2, flag2)) in d.iteritems():
144 for fn, value in d.iteritems():
145 if fn in deletedset:
145 if fn in deletedset:
146 continue
146 continue
147 if value is None:
148 clean.append(fn)
149 continue
150 (node1, flag1), (node2, flag2) = value
147 if node1 is None:
151 if node1 is None:
148 added.append(fn)
152 added.append(fn)
149 elif node2 is None:
153 elif node2 is None:
150 removed.append(fn)
154 removed.append(fn)
151 elif node2 != _newnode:
155 elif node2 != _newnode:
152 # The file was not a new file in mf2, so an entry
156 # The file was not a new file in mf2, so an entry
153 # from diff is really a difference.
157 # from diff is really a difference.
154 modified.append(fn)
158 modified.append(fn)
155 elif self[fn].cmp(other[fn]):
159 elif self[fn].cmp(other[fn]):
156 # node2 was newnode, but the working file doesn't
160 # node2 was newnode, but the working file doesn't
157 # match the one in mf1.
161 # match the one in mf1.
158 modified.append(fn)
162 modified.append(fn)
159 else:
163 else:
160 clean.add(fn)
164 clean.append(fn)
161 if listclean:
162 nondiff = (set(mf1) | set(mf2)) - set(d)
163 clean = list((clean | nondiff) - deletedset)
164 else:
165 clean = []
166
165
167 if removed:
166 if removed:
168 # need to filter files if they are already reported as removed
167 # need to filter files if they are already reported as removed
169 unknown = [fn for fn in unknown if fn not in mf1]
168 unknown = [fn for fn in unknown if fn not in mf1]
170 ignored = [fn for fn in ignored if fn not in mf1]
169 ignored = [fn for fn in ignored if fn not in mf1]
171 # if they're deleted, don't report them as removed
170 # if they're deleted, don't report them as removed
172 removed = [fn for fn in removed if fn not in deletedset]
171 removed = [fn for fn in removed if fn not in deletedset]
173
172
174 return scmutil.status(modified, added, removed, deleted, unknown,
173 return scmutil.status(modified, added, removed, deleted, unknown,
175 ignored, clean)
174 ignored, clean)
176
175
177 @propertycache
176 @propertycache
178 def substate(self):
177 def substate(self):
179 return subrepo.state(self, self._repo.ui)
178 return subrepo.state(self, self._repo.ui)
180
179
181 def subrev(self, subpath):
180 def subrev(self, subpath):
182 return self.substate[subpath][1]
181 return self.substate[subpath][1]
183
182
184 def rev(self):
183 def rev(self):
185 return self._rev
184 return self._rev
186 def node(self):
185 def node(self):
187 return self._node
186 return self._node
188 def hex(self):
187 def hex(self):
189 return hex(self.node())
188 return hex(self.node())
190 def manifest(self):
189 def manifest(self):
191 return self._manifest
190 return self._manifest
192 def phasestr(self):
191 def phasestr(self):
193 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
194 def mutable(self):
193 def mutable(self):
195 return self.phase() > phases.public
194 return self.phase() > phases.public
196
195
197 def getfileset(self, expr):
196 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
199
198
200 def obsolete(self):
199 def obsolete(self):
201 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
202
204 def extinct(self):
203 def extinct(self):
205 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
206
208 def unstable(self):
207 def unstable(self):
209 """True if the changeset is not obsolete but it's ancestor are"""
208 """True if the changeset is not obsolete but it's ancestor are"""
210 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
211
210
212 def bumped(self):
211 def bumped(self):
213 """True if the changeset try to be a successor of a public changeset
212 """True if the changeset try to be a successor of a public changeset
214
213
215 Only non-public and non-obsolete changesets may be bumped.
214 Only non-public and non-obsolete changesets may be bumped.
216 """
215 """
217 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
218
217
219 def divergent(self):
218 def divergent(self):
220 """Is a successors of a changeset with multiple possible successors set
219 """Is a successors of a changeset with multiple possible successors set
221
220
222 Only non-public and non-obsolete changesets may be divergent.
221 Only non-public and non-obsolete changesets may be divergent.
223 """
222 """
224 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
225
224
226 def troubled(self):
225 def troubled(self):
227 """True if the changeset is either unstable, bumped or divergent"""
226 """True if the changeset is either unstable, bumped or divergent"""
228 return self.unstable() or self.bumped() or self.divergent()
227 return self.unstable() or self.bumped() or self.divergent()
229
228
230 def troubles(self):
229 def troubles(self):
231 """return the list of troubles affecting this changesets.
230 """return the list of troubles affecting this changesets.
232
231
233 Troubles are returned as strings. possible values are:
232 Troubles are returned as strings. possible values are:
234 - unstable,
233 - unstable,
235 - bumped,
234 - bumped,
236 - divergent.
235 - divergent.
237 """
236 """
238 troubles = []
237 troubles = []
239 if self.unstable():
238 if self.unstable():
240 troubles.append('unstable')
239 troubles.append('unstable')
241 if self.bumped():
240 if self.bumped():
242 troubles.append('bumped')
241 troubles.append('bumped')
243 if self.divergent():
242 if self.divergent():
244 troubles.append('divergent')
243 troubles.append('divergent')
245 return troubles
244 return troubles
246
245
247 def parents(self):
246 def parents(self):
248 """return contexts for each parent changeset"""
247 """return contexts for each parent changeset"""
249 return self._parents
248 return self._parents
250
249
251 def p1(self):
250 def p1(self):
252 return self._parents[0]
251 return self._parents[0]
253
252
254 def p2(self):
253 def p2(self):
255 if len(self._parents) == 2:
254 if len(self._parents) == 2:
256 return self._parents[1]
255 return self._parents[1]
257 return changectx(self._repo, -1)
256 return changectx(self._repo, -1)
258
257
259 def _fileinfo(self, path):
258 def _fileinfo(self, path):
260 if '_manifest' in self.__dict__:
259 if '_manifest' in self.__dict__:
261 try:
260 try:
262 return self._manifest[path], self._manifest.flags(path)
261 return self._manifest[path], self._manifest.flags(path)
263 except KeyError:
262 except KeyError:
264 raise error.ManifestLookupError(self._node, path,
263 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
264 _('not found in manifest'))
266 if '_manifestdelta' in self.__dict__ or path in self.files():
265 if '_manifestdelta' in self.__dict__ or path in self.files():
267 if path in self._manifestdelta:
266 if path in self._manifestdelta:
268 return (self._manifestdelta[path],
267 return (self._manifestdelta[path],
269 self._manifestdelta.flags(path))
268 self._manifestdelta.flags(path))
270 node, flag = self._repo.manifest.find(self._changeset[0], path)
269 node, flag = self._repo.manifest.find(self._changeset[0], path)
271 if not node:
270 if not node:
272 raise error.ManifestLookupError(self._node, path,
271 raise error.ManifestLookupError(self._node, path,
273 _('not found in manifest'))
272 _('not found in manifest'))
274
273
275 return node, flag
274 return node, flag
276
275
277 def filenode(self, path):
276 def filenode(self, path):
278 return self._fileinfo(path)[0]
277 return self._fileinfo(path)[0]
279
278
280 def flags(self, path):
279 def flags(self, path):
281 try:
280 try:
282 return self._fileinfo(path)[1]
281 return self._fileinfo(path)[1]
283 except error.LookupError:
282 except error.LookupError:
284 return ''
283 return ''
285
284
286 def sub(self, path):
285 def sub(self, path):
287 return subrepo.subrepo(self, path)
286 return subrepo.subrepo(self, path)
288
287
289 def match(self, pats=[], include=None, exclude=None, default='glob'):
288 def match(self, pats=[], include=None, exclude=None, default='glob'):
290 r = self._repo
289 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
290 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
291 include, exclude, default,
293 auditor=r.auditor, ctx=self)
292 auditor=r.auditor, ctx=self)
294
293
295 def diff(self, ctx2=None, match=None, **opts):
294 def diff(self, ctx2=None, match=None, **opts):
296 """Returns a diff generator for the given contexts and matcher"""
295 """Returns a diff generator for the given contexts and matcher"""
297 if ctx2 is None:
296 if ctx2 is None:
298 ctx2 = self.p1()
297 ctx2 = self.p1()
299 if ctx2 is not None:
298 if ctx2 is not None:
300 ctx2 = self._repo[ctx2]
299 ctx2 = self._repo[ctx2]
301 diffopts = patch.diffopts(self._repo.ui, opts)
300 diffopts = patch.diffopts(self._repo.ui, opts)
302 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
301 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
303
302
304 @propertycache
303 @propertycache
305 def _dirs(self):
304 def _dirs(self):
306 return scmutil.dirs(self._manifest)
305 return scmutil.dirs(self._manifest)
307
306
308 def dirs(self):
307 def dirs(self):
309 return self._dirs
308 return self._dirs
310
309
311 def dirty(self, missing=False, merge=True, branch=True):
310 def dirty(self, missing=False, merge=True, branch=True):
312 return False
311 return False
313
312
314 def status(self, other=None, match=None, listignored=False,
313 def status(self, other=None, match=None, listignored=False,
315 listclean=False, listunknown=False, listsubrepos=False):
314 listclean=False, listunknown=False, listsubrepos=False):
316 """return status of files between two nodes or node and working
315 """return status of files between two nodes or node and working
317 directory.
316 directory.
318
317
319 If other is None, compare this node with working directory.
318 If other is None, compare this node with working directory.
320
319
321 returns (modified, added, removed, deleted, unknown, ignored, clean)
320 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 """
321 """
323
322
324 ctx1 = self
323 ctx1 = self
325 ctx2 = self._repo[other]
324 ctx2 = self._repo[other]
326
325
327 # This next code block is, admittedly, fragile logic that tests for
326 # This next code block is, admittedly, fragile logic that tests for
328 # reversing the contexts and wouldn't need to exist if it weren't for
327 # reversing the contexts and wouldn't need to exist if it weren't for
329 # the fast (and common) code path of comparing the working directory
328 # the fast (and common) code path of comparing the working directory
330 # with its first parent.
329 # with its first parent.
331 #
330 #
332 # What we're aiming for here is the ability to call:
331 # What we're aiming for here is the ability to call:
333 #
332 #
334 # workingctx.status(parentctx)
333 # workingctx.status(parentctx)
335 #
334 #
336 # If we always built the manifest for each context and compared those,
335 # If we always built the manifest for each context and compared those,
337 # then we'd be done. But the special case of the above call means we
336 # then we'd be done. But the special case of the above call means we
338 # just copy the manifest of the parent.
337 # just copy the manifest of the parent.
339 reversed = False
338 reversed = False
340 if (not isinstance(ctx1, changectx)
339 if (not isinstance(ctx1, changectx)
341 and isinstance(ctx2, changectx)):
340 and isinstance(ctx2, changectx)):
342 reversed = True
341 reversed = True
343 ctx1, ctx2 = ctx2, ctx1
342 ctx1, ctx2 = ctx2, ctx1
344
343
345 match = ctx2._matchstatus(ctx1, match)
344 match = ctx2._matchstatus(ctx1, match)
346 r = scmutil.status([], [], [], [], [], [], [])
345 r = scmutil.status([], [], [], [], [], [], [])
347 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
346 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
348 listunknown)
347 listunknown)
349
348
350 if reversed:
349 if reversed:
351 # Reverse added and removed. Clear deleted, unknown and ignored as
350 # Reverse added and removed. Clear deleted, unknown and ignored as
352 # these make no sense to reverse.
351 # these make no sense to reverse.
353 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
352 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
354 r.clean)
353 r.clean)
355
354
356 if listsubrepos:
355 if listsubrepos:
357 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
356 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
358 rev2 = ctx2.subrev(subpath)
357 rev2 = ctx2.subrev(subpath)
359 try:
358 try:
360 submatch = matchmod.narrowmatcher(subpath, match)
359 submatch = matchmod.narrowmatcher(subpath, match)
361 s = sub.status(rev2, match=submatch, ignored=listignored,
360 s = sub.status(rev2, match=submatch, ignored=listignored,
362 clean=listclean, unknown=listunknown,
361 clean=listclean, unknown=listunknown,
363 listsubrepos=True)
362 listsubrepos=True)
364 for rfiles, sfiles in zip(r, s):
363 for rfiles, sfiles in zip(r, s):
365 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
364 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
366 except error.LookupError:
365 except error.LookupError:
367 self._repo.ui.status(_("skipping missing "
366 self._repo.ui.status(_("skipping missing "
368 "subrepository: %s\n") % subpath)
367 "subrepository: %s\n") % subpath)
369
368
370 for l in r:
369 for l in r:
371 l.sort()
370 l.sort()
372
371
373 return r
372 return r
374
373
375
374
376 def makememctx(repo, parents, text, user, date, branch, files, store,
375 def makememctx(repo, parents, text, user, date, branch, files, store,
377 editor=None):
376 editor=None):
378 def getfilectx(repo, memctx, path):
377 def getfilectx(repo, memctx, path):
379 data, mode, copied = store.getfile(path)
378 data, mode, copied = store.getfile(path)
380 if data is None:
379 if data is None:
381 return None
380 return None
382 islink, isexec = mode
381 islink, isexec = mode
383 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
382 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
384 copied=copied, memctx=memctx)
383 copied=copied, memctx=memctx)
385 extra = {}
384 extra = {}
386 if branch:
385 if branch:
387 extra['branch'] = encoding.fromlocal(branch)
386 extra['branch'] = encoding.fromlocal(branch)
388 ctx = memctx(repo, parents, text, files, getfilectx, user,
387 ctx = memctx(repo, parents, text, files, getfilectx, user,
389 date, extra, editor)
388 date, extra, editor)
390 return ctx
389 return ctx
391
390
392 class changectx(basectx):
391 class changectx(basectx):
393 """A changecontext object makes access to data related to a particular
392 """A changecontext object makes access to data related to a particular
394 changeset convenient. It represents a read-only context already present in
393 changeset convenient. It represents a read-only context already present in
395 the repo."""
394 the repo."""
396 def __init__(self, repo, changeid=''):
395 def __init__(self, repo, changeid=''):
397 """changeid is a revision number, node, or tag"""
396 """changeid is a revision number, node, or tag"""
398
397
399 # since basectx.__new__ already took care of copying the object, we
398 # since basectx.__new__ already took care of copying the object, we
400 # don't need to do anything in __init__, so we just exit here
399 # don't need to do anything in __init__, so we just exit here
401 if isinstance(changeid, basectx):
400 if isinstance(changeid, basectx):
402 return
401 return
403
402
404 if changeid == '':
403 if changeid == '':
405 changeid = '.'
404 changeid = '.'
406 self._repo = repo
405 self._repo = repo
407
406
408 try:
407 try:
409 if isinstance(changeid, int):
408 if isinstance(changeid, int):
410 self._node = repo.changelog.node(changeid)
409 self._node = repo.changelog.node(changeid)
411 self._rev = changeid
410 self._rev = changeid
412 return
411 return
413 if isinstance(changeid, long):
412 if isinstance(changeid, long):
414 changeid = str(changeid)
413 changeid = str(changeid)
415 if changeid == '.':
414 if changeid == '.':
416 self._node = repo.dirstate.p1()
415 self._node = repo.dirstate.p1()
417 self._rev = repo.changelog.rev(self._node)
416 self._rev = repo.changelog.rev(self._node)
418 return
417 return
419 if changeid == 'null':
418 if changeid == 'null':
420 self._node = nullid
419 self._node = nullid
421 self._rev = nullrev
420 self._rev = nullrev
422 return
421 return
423 if changeid == 'tip':
422 if changeid == 'tip':
424 self._node = repo.changelog.tip()
423 self._node = repo.changelog.tip()
425 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
426 return
425 return
427 if len(changeid) == 20:
426 if len(changeid) == 20:
428 try:
427 try:
429 self._node = changeid
428 self._node = changeid
430 self._rev = repo.changelog.rev(changeid)
429 self._rev = repo.changelog.rev(changeid)
431 return
430 return
432 except error.FilteredRepoLookupError:
431 except error.FilteredRepoLookupError:
433 raise
432 raise
434 except LookupError:
433 except LookupError:
435 pass
434 pass
436
435
437 try:
436 try:
438 r = int(changeid)
437 r = int(changeid)
439 if str(r) != changeid:
438 if str(r) != changeid:
440 raise ValueError
439 raise ValueError
441 l = len(repo.changelog)
440 l = len(repo.changelog)
442 if r < 0:
441 if r < 0:
443 r += l
442 r += l
444 if r < 0 or r >= l:
443 if r < 0 or r >= l:
445 raise ValueError
444 raise ValueError
446 self._rev = r
445 self._rev = r
447 self._node = repo.changelog.node(r)
446 self._node = repo.changelog.node(r)
448 return
447 return
449 except error.FilteredIndexError:
448 except error.FilteredIndexError:
450 raise
449 raise
451 except (ValueError, OverflowError, IndexError):
450 except (ValueError, OverflowError, IndexError):
452 pass
451 pass
453
452
454 if len(changeid) == 40:
453 if len(changeid) == 40:
455 try:
454 try:
456 self._node = bin(changeid)
455 self._node = bin(changeid)
457 self._rev = repo.changelog.rev(self._node)
456 self._rev = repo.changelog.rev(self._node)
458 return
457 return
459 except error.FilteredLookupError:
458 except error.FilteredLookupError:
460 raise
459 raise
461 except (TypeError, LookupError):
460 except (TypeError, LookupError):
462 pass
461 pass
463
462
464 # lookup bookmarks through the name interface
463 # lookup bookmarks through the name interface
465 try:
464 try:
466 self._node = repo.names.singlenode(repo, changeid)
465 self._node = repo.names.singlenode(repo, changeid)
467 self._rev = repo.changelog.rev(self._node)
466 self._rev = repo.changelog.rev(self._node)
468 return
467 return
469 except KeyError:
468 except KeyError:
470 pass
469 pass
471 except error.FilteredRepoLookupError:
470 except error.FilteredRepoLookupError:
472 raise
471 raise
473 except error.RepoLookupError:
472 except error.RepoLookupError:
474 pass
473 pass
475
474
476 self._node = repo.unfiltered().changelog._partialmatch(changeid)
475 self._node = repo.unfiltered().changelog._partialmatch(changeid)
477 if self._node is not None:
476 if self._node is not None:
478 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
479 return
478 return
480
479
481 # lookup failed
480 # lookup failed
482 # check if it might have come from damaged dirstate
481 # check if it might have come from damaged dirstate
483 #
482 #
484 # XXX we could avoid the unfiltered if we had a recognizable
483 # XXX we could avoid the unfiltered if we had a recognizable
485 # exception for filtered changeset access
484 # exception for filtered changeset access
486 if changeid in repo.unfiltered().dirstate.parents():
485 if changeid in repo.unfiltered().dirstate.parents():
487 msg = _("working directory has unknown parent '%s'!")
486 msg = _("working directory has unknown parent '%s'!")
488 raise error.Abort(msg % short(changeid))
487 raise error.Abort(msg % short(changeid))
489 try:
488 try:
490 if len(changeid) == 20:
489 if len(changeid) == 20:
491 changeid = hex(changeid)
490 changeid = hex(changeid)
492 except TypeError:
491 except TypeError:
493 pass
492 pass
494 except (error.FilteredIndexError, error.FilteredLookupError,
493 except (error.FilteredIndexError, error.FilteredLookupError,
495 error.FilteredRepoLookupError):
494 error.FilteredRepoLookupError):
496 if repo.filtername == 'visible':
495 if repo.filtername == 'visible':
497 msg = _("hidden revision '%s'") % changeid
496 msg = _("hidden revision '%s'") % changeid
498 hint = _('use --hidden to access hidden revisions')
497 hint = _('use --hidden to access hidden revisions')
499 raise error.FilteredRepoLookupError(msg, hint=hint)
498 raise error.FilteredRepoLookupError(msg, hint=hint)
500 msg = _("filtered revision '%s' (not in '%s' subset)")
499 msg = _("filtered revision '%s' (not in '%s' subset)")
501 msg %= (changeid, repo.filtername)
500 msg %= (changeid, repo.filtername)
502 raise error.FilteredRepoLookupError(msg)
501 raise error.FilteredRepoLookupError(msg)
503 except IndexError:
502 except IndexError:
504 pass
503 pass
505 raise error.RepoLookupError(
504 raise error.RepoLookupError(
506 _("unknown revision '%s'") % changeid)
505 _("unknown revision '%s'") % changeid)
507
506
508 def __hash__(self):
507 def __hash__(self):
509 try:
508 try:
510 return hash(self._rev)
509 return hash(self._rev)
511 except AttributeError:
510 except AttributeError:
512 return id(self)
511 return id(self)
513
512
514 def __nonzero__(self):
513 def __nonzero__(self):
515 return self._rev != nullrev
514 return self._rev != nullrev
516
515
517 @propertycache
516 @propertycache
518 def _changeset(self):
517 def _changeset(self):
519 return self._repo.changelog.read(self.rev())
518 return self._repo.changelog.read(self.rev())
520
519
521 @propertycache
520 @propertycache
522 def _manifest(self):
521 def _manifest(self):
523 return self._repo.manifest.read(self._changeset[0])
522 return self._repo.manifest.read(self._changeset[0])
524
523
525 @propertycache
524 @propertycache
526 def _manifestdelta(self):
525 def _manifestdelta(self):
527 return self._repo.manifest.readdelta(self._changeset[0])
526 return self._repo.manifest.readdelta(self._changeset[0])
528
527
529 @propertycache
528 @propertycache
530 def _parents(self):
529 def _parents(self):
531 p = self._repo.changelog.parentrevs(self._rev)
530 p = self._repo.changelog.parentrevs(self._rev)
532 if p[1] == nullrev:
531 if p[1] == nullrev:
533 p = p[:-1]
532 p = p[:-1]
534 return [changectx(self._repo, x) for x in p]
533 return [changectx(self._repo, x) for x in p]
535
534
536 def changeset(self):
535 def changeset(self):
537 return self._changeset
536 return self._changeset
538 def manifestnode(self):
537 def manifestnode(self):
539 return self._changeset[0]
538 return self._changeset[0]
540
539
541 def user(self):
540 def user(self):
542 return self._changeset[1]
541 return self._changeset[1]
543 def date(self):
542 def date(self):
544 return self._changeset[2]
543 return self._changeset[2]
545 def files(self):
544 def files(self):
546 return self._changeset[3]
545 return self._changeset[3]
547 def description(self):
546 def description(self):
548 return self._changeset[4]
547 return self._changeset[4]
549 def branch(self):
548 def branch(self):
550 return encoding.tolocal(self._changeset[5].get("branch"))
549 return encoding.tolocal(self._changeset[5].get("branch"))
551 def closesbranch(self):
550 def closesbranch(self):
552 return 'close' in self._changeset[5]
551 return 'close' in self._changeset[5]
553 def extra(self):
552 def extra(self):
554 return self._changeset[5]
553 return self._changeset[5]
555 def tags(self):
554 def tags(self):
556 return self._repo.nodetags(self._node)
555 return self._repo.nodetags(self._node)
557 def bookmarks(self):
556 def bookmarks(self):
558 return self._repo.nodebookmarks(self._node)
557 return self._repo.nodebookmarks(self._node)
559 def phase(self):
558 def phase(self):
560 return self._repo._phasecache.phase(self._repo, self._rev)
559 return self._repo._phasecache.phase(self._repo, self._rev)
561 def hidden(self):
560 def hidden(self):
562 return self._rev in repoview.filterrevs(self._repo, 'visible')
561 return self._rev in repoview.filterrevs(self._repo, 'visible')
563
562
564 def children(self):
563 def children(self):
565 """return contexts for each child changeset"""
564 """return contexts for each child changeset"""
566 c = self._repo.changelog.children(self._node)
565 c = self._repo.changelog.children(self._node)
567 return [changectx(self._repo, x) for x in c]
566 return [changectx(self._repo, x) for x in c]
568
567
569 def ancestors(self):
568 def ancestors(self):
570 for a in self._repo.changelog.ancestors([self._rev]):
569 for a in self._repo.changelog.ancestors([self._rev]):
571 yield changectx(self._repo, a)
570 yield changectx(self._repo, a)
572
571
573 def descendants(self):
572 def descendants(self):
574 for d in self._repo.changelog.descendants([self._rev]):
573 for d in self._repo.changelog.descendants([self._rev]):
575 yield changectx(self._repo, d)
574 yield changectx(self._repo, d)
576
575
577 def filectx(self, path, fileid=None, filelog=None):
576 def filectx(self, path, fileid=None, filelog=None):
578 """get a file context from this changeset"""
577 """get a file context from this changeset"""
579 if fileid is None:
578 if fileid is None:
580 fileid = self.filenode(path)
579 fileid = self.filenode(path)
581 return filectx(self._repo, path, fileid=fileid,
580 return filectx(self._repo, path, fileid=fileid,
582 changectx=self, filelog=filelog)
581 changectx=self, filelog=filelog)
583
582
584 def ancestor(self, c2, warn=False):
583 def ancestor(self, c2, warn=False):
585 """return the "best" ancestor context of self and c2
584 """return the "best" ancestor context of self and c2
586
585
587 If there are multiple candidates, it will show a message and check
586 If there are multiple candidates, it will show a message and check
588 merge.preferancestor configuration before falling back to the
587 merge.preferancestor configuration before falling back to the
589 revlog ancestor."""
588 revlog ancestor."""
590 # deal with workingctxs
589 # deal with workingctxs
591 n2 = c2._node
590 n2 = c2._node
592 if n2 is None:
591 if n2 is None:
593 n2 = c2._parents[0]._node
592 n2 = c2._parents[0]._node
594 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
593 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
595 if not cahs:
594 if not cahs:
596 anc = nullid
595 anc = nullid
597 elif len(cahs) == 1:
596 elif len(cahs) == 1:
598 anc = cahs[0]
597 anc = cahs[0]
599 else:
598 else:
600 for r in self._repo.ui.configlist('merge', 'preferancestor'):
599 for r in self._repo.ui.configlist('merge', 'preferancestor'):
601 try:
600 try:
602 ctx = changectx(self._repo, r)
601 ctx = changectx(self._repo, r)
603 except error.RepoLookupError:
602 except error.RepoLookupError:
604 continue
603 continue
605 anc = ctx.node()
604 anc = ctx.node()
606 if anc in cahs:
605 if anc in cahs:
607 break
606 break
608 else:
607 else:
609 anc = self._repo.changelog.ancestor(self._node, n2)
608 anc = self._repo.changelog.ancestor(self._node, n2)
610 if warn:
609 if warn:
611 self._repo.ui.status(
610 self._repo.ui.status(
612 (_("note: using %s as ancestor of %s and %s\n") %
611 (_("note: using %s as ancestor of %s and %s\n") %
613 (short(anc), short(self._node), short(n2))) +
612 (short(anc), short(self._node), short(n2))) +
614 ''.join(_(" alternatively, use --config "
613 ''.join(_(" alternatively, use --config "
615 "merge.preferancestor=%s\n") %
614 "merge.preferancestor=%s\n") %
616 short(n) for n in sorted(cahs) if n != anc))
615 short(n) for n in sorted(cahs) if n != anc))
617 return changectx(self._repo, anc)
616 return changectx(self._repo, anc)
618
617
619 def descendant(self, other):
618 def descendant(self, other):
620 """True if other is descendant of this changeset"""
619 """True if other is descendant of this changeset"""
621 return self._repo.changelog.descendant(self._rev, other._rev)
620 return self._repo.changelog.descendant(self._rev, other._rev)
622
621
623 def walk(self, match):
622 def walk(self, match):
624 fset = set(match.files())
623 fset = set(match.files())
625 # for dirstate.walk, files=['.'] means "walk the whole tree".
624 # for dirstate.walk, files=['.'] means "walk the whole tree".
626 # follow that here, too
625 # follow that here, too
627 fset.discard('.')
626 fset.discard('.')
628
627
629 # avoid the entire walk if we're only looking for specific files
628 # avoid the entire walk if we're only looking for specific files
630 if fset and not match.anypats():
629 if fset and not match.anypats():
631 if util.all([fn in self for fn in fset]):
630 if util.all([fn in self for fn in fset]):
632 for fn in sorted(fset):
631 for fn in sorted(fset):
633 if match(fn):
632 if match(fn):
634 yield fn
633 yield fn
635 raise StopIteration
634 raise StopIteration
636
635
637 for fn in self:
636 for fn in self:
638 if fn in fset:
637 if fn in fset:
639 # specified pattern is the exact name
638 # specified pattern is the exact name
640 fset.remove(fn)
639 fset.remove(fn)
641 if match(fn):
640 if match(fn):
642 yield fn
641 yield fn
643 for fn in sorted(fset):
642 for fn in sorted(fset):
644 if fn in self._dirs:
643 if fn in self._dirs:
645 # specified pattern is a directory
644 # specified pattern is a directory
646 continue
645 continue
647 match.bad(fn, _('no such file in rev %s') % self)
646 match.bad(fn, _('no such file in rev %s') % self)
648
647
649 def matches(self, match):
648 def matches(self, match):
650 return self.walk(match)
649 return self.walk(match)
651
650
652 class basefilectx(object):
651 class basefilectx(object):
653 """A filecontext object represents the common logic for its children:
652 """A filecontext object represents the common logic for its children:
654 filectx: read-only access to a filerevision that is already present
653 filectx: read-only access to a filerevision that is already present
655 in the repo,
654 in the repo,
656 workingfilectx: a filecontext that represents files from the working
655 workingfilectx: a filecontext that represents files from the working
657 directory,
656 directory,
658 memfilectx: a filecontext that represents files in-memory."""
657 memfilectx: a filecontext that represents files in-memory."""
659 def __new__(cls, repo, path, *args, **kwargs):
658 def __new__(cls, repo, path, *args, **kwargs):
660 return super(basefilectx, cls).__new__(cls)
659 return super(basefilectx, cls).__new__(cls)
661
660
662 @propertycache
661 @propertycache
663 def _filelog(self):
662 def _filelog(self):
664 return self._repo.file(self._path)
663 return self._repo.file(self._path)
665
664
666 @propertycache
665 @propertycache
667 def _changeid(self):
666 def _changeid(self):
668 if '_changeid' in self.__dict__:
667 if '_changeid' in self.__dict__:
669 return self._changeid
668 return self._changeid
670 elif '_changectx' in self.__dict__:
669 elif '_changectx' in self.__dict__:
671 return self._changectx.rev()
670 return self._changectx.rev()
672 else:
671 else:
673 return self._filelog.linkrev(self._filerev)
672 return self._filelog.linkrev(self._filerev)
674
673
675 @propertycache
674 @propertycache
676 def _filenode(self):
675 def _filenode(self):
677 if '_fileid' in self.__dict__:
676 if '_fileid' in self.__dict__:
678 return self._filelog.lookup(self._fileid)
677 return self._filelog.lookup(self._fileid)
679 else:
678 else:
680 return self._changectx.filenode(self._path)
679 return self._changectx.filenode(self._path)
681
680
682 @propertycache
681 @propertycache
683 def _filerev(self):
682 def _filerev(self):
684 return self._filelog.rev(self._filenode)
683 return self._filelog.rev(self._filenode)
685
684
686 @propertycache
685 @propertycache
687 def _repopath(self):
686 def _repopath(self):
688 return self._path
687 return self._path
689
688
690 def __nonzero__(self):
689 def __nonzero__(self):
691 try:
690 try:
692 self._filenode
691 self._filenode
693 return True
692 return True
694 except error.LookupError:
693 except error.LookupError:
695 # file is missing
694 # file is missing
696 return False
695 return False
697
696
698 def __str__(self):
697 def __str__(self):
699 return "%s@%s" % (self.path(), self._changectx)
698 return "%s@%s" % (self.path(), self._changectx)
700
699
701 def __repr__(self):
700 def __repr__(self):
702 return "<%s %s>" % (type(self).__name__, str(self))
701 return "<%s %s>" % (type(self).__name__, str(self))
703
702
704 def __hash__(self):
703 def __hash__(self):
705 try:
704 try:
706 return hash((self._path, self._filenode))
705 return hash((self._path, self._filenode))
707 except AttributeError:
706 except AttributeError:
708 return id(self)
707 return id(self)
709
708
710 def __eq__(self, other):
709 def __eq__(self, other):
711 try:
710 try:
712 return (type(self) == type(other) and self._path == other._path
711 return (type(self) == type(other) and self._path == other._path
713 and self._filenode == other._filenode)
712 and self._filenode == other._filenode)
714 except AttributeError:
713 except AttributeError:
715 return False
714 return False
716
715
717 def __ne__(self, other):
716 def __ne__(self, other):
718 return not (self == other)
717 return not (self == other)
719
718
720 def filerev(self):
719 def filerev(self):
721 return self._filerev
720 return self._filerev
722 def filenode(self):
721 def filenode(self):
723 return self._filenode
722 return self._filenode
724 def flags(self):
723 def flags(self):
725 return self._changectx.flags(self._path)
724 return self._changectx.flags(self._path)
726 def filelog(self):
725 def filelog(self):
727 return self._filelog
726 return self._filelog
728 def rev(self):
727 def rev(self):
729 return self._changeid
728 return self._changeid
730 def linkrev(self):
729 def linkrev(self):
731 return self._filelog.linkrev(self._filerev)
730 return self._filelog.linkrev(self._filerev)
732 def node(self):
731 def node(self):
733 return self._changectx.node()
732 return self._changectx.node()
734 def hex(self):
733 def hex(self):
735 return self._changectx.hex()
734 return self._changectx.hex()
736 def user(self):
735 def user(self):
737 return self._changectx.user()
736 return self._changectx.user()
738 def date(self):
737 def date(self):
739 return self._changectx.date()
738 return self._changectx.date()
740 def files(self):
739 def files(self):
741 return self._changectx.files()
740 return self._changectx.files()
742 def description(self):
741 def description(self):
743 return self._changectx.description()
742 return self._changectx.description()
744 def branch(self):
743 def branch(self):
745 return self._changectx.branch()
744 return self._changectx.branch()
746 def extra(self):
745 def extra(self):
747 return self._changectx.extra()
746 return self._changectx.extra()
748 def phase(self):
747 def phase(self):
749 return self._changectx.phase()
748 return self._changectx.phase()
750 def phasestr(self):
749 def phasestr(self):
751 return self._changectx.phasestr()
750 return self._changectx.phasestr()
752 def manifest(self):
751 def manifest(self):
753 return self._changectx.manifest()
752 return self._changectx.manifest()
754 def changectx(self):
753 def changectx(self):
755 return self._changectx
754 return self._changectx
756
755
757 def path(self):
756 def path(self):
758 return self._path
757 return self._path
759
758
760 def isbinary(self):
759 def isbinary(self):
761 try:
760 try:
762 return util.binary(self.data())
761 return util.binary(self.data())
763 except IOError:
762 except IOError:
764 return False
763 return False
765 def isexec(self):
764 def isexec(self):
766 return 'x' in self.flags()
765 return 'x' in self.flags()
767 def islink(self):
766 def islink(self):
768 return 'l' in self.flags()
767 return 'l' in self.flags()
769
768
770 def cmp(self, fctx):
769 def cmp(self, fctx):
771 """compare with other file context
770 """compare with other file context
772
771
773 returns True if different than fctx.
772 returns True if different than fctx.
774 """
773 """
775 if (fctx._filerev is None
774 if (fctx._filerev is None
776 and (self._repo._encodefilterpats
775 and (self._repo._encodefilterpats
777 # if file data starts with '\1\n', empty metadata block is
776 # if file data starts with '\1\n', empty metadata block is
778 # prepended, which adds 4 bytes to filelog.size().
777 # prepended, which adds 4 bytes to filelog.size().
779 or self.size() - 4 == fctx.size())
778 or self.size() - 4 == fctx.size())
780 or self.size() == fctx.size()):
779 or self.size() == fctx.size()):
781 return self._filelog.cmp(self._filenode, fctx.data())
780 return self._filelog.cmp(self._filenode, fctx.data())
782
781
783 return True
782 return True
784
783
785 def introrev(self):
784 def introrev(self):
786 """return the rev of the changeset which introduced this file revision
785 """return the rev of the changeset which introduced this file revision
787
786
788 This method is different from linkrev because it take into account the
787 This method is different from linkrev because it take into account the
789 changeset the filectx was created from. It ensures the returned
788 changeset the filectx was created from. It ensures the returned
790 revision is one of its ancestors. This prevents bugs from
789 revision is one of its ancestors. This prevents bugs from
791 'linkrev-shadowing' when a file revision is used by multiple
790 'linkrev-shadowing' when a file revision is used by multiple
792 changesets.
791 changesets.
793 """
792 """
794 lkr = self.linkrev()
793 lkr = self.linkrev()
795 attrs = vars(self)
794 attrs = vars(self)
796 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
795 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
797 if noctx or self.rev() == lkr:
796 if noctx or self.rev() == lkr:
798 return self.linkrev()
797 return self.linkrev()
799 return _adjustlinkrev(self._repo, self._path, self._filelog,
798 return _adjustlinkrev(self._repo, self._path, self._filelog,
800 self._filenode, self.rev(), inclusive=True)
799 self._filenode, self.rev(), inclusive=True)
801
800
802 def parents(self):
801 def parents(self):
803 _path = self._path
802 _path = self._path
804 fl = self._filelog
803 fl = self._filelog
805 parents = self._filelog.parents(self._filenode)
804 parents = self._filelog.parents(self._filenode)
806 pl = [(_path, node, fl) for node in parents if node != nullid]
805 pl = [(_path, node, fl) for node in parents if node != nullid]
807
806
808 r = fl.renamed(self._filenode)
807 r = fl.renamed(self._filenode)
809 if r:
808 if r:
810 # - In the simple rename case, both parent are nullid, pl is empty.
809 # - In the simple rename case, both parent are nullid, pl is empty.
811 # - In case of merge, only one of the parent is null id and should
810 # - In case of merge, only one of the parent is null id and should
812 # be replaced with the rename information. This parent is -always-
811 # be replaced with the rename information. This parent is -always-
813 # the first one.
812 # the first one.
814 #
813 #
815 # As null id have alway been filtered out in the previous list
814 # As null id have alway been filtered out in the previous list
816 # comprehension, inserting to 0 will always result in "replacing
815 # comprehension, inserting to 0 will always result in "replacing
817 # first nullid parent with rename information.
816 # first nullid parent with rename information.
818 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
817 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
819
818
820 ret = []
819 ret = []
821 for path, fnode, l in pl:
820 for path, fnode, l in pl:
822 if '_changeid' in vars(self) or '_changectx' in vars(self):
821 if '_changeid' in vars(self) or '_changectx' in vars(self):
823 # If self is associated with a changeset (probably explicitly
822 # If self is associated with a changeset (probably explicitly
824 # fed), ensure the created filectx is associated with a
823 # fed), ensure the created filectx is associated with a
825 # changeset that is an ancestor of self.changectx.
824 # changeset that is an ancestor of self.changectx.
826 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
825 rev = _adjustlinkrev(self._repo, path, l, fnode, self.rev())
827 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
826 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
828 changeid=rev)
827 changeid=rev)
829 else:
828 else:
830 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
829 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
831 ret.append(fctx)
830 ret.append(fctx)
832 return ret
831 return ret
833
832
834 def p1(self):
833 def p1(self):
835 return self.parents()[0]
834 return self.parents()[0]
836
835
837 def p2(self):
836 def p2(self):
838 p = self.parents()
837 p = self.parents()
839 if len(p) == 2:
838 if len(p) == 2:
840 return p[1]
839 return p[1]
841 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
840 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
842
841
843 def annotate(self, follow=False, linenumber=None, diffopts=None):
842 def annotate(self, follow=False, linenumber=None, diffopts=None):
844 '''returns a list of tuples of (ctx, line) for each line
843 '''returns a list of tuples of (ctx, line) for each line
845 in the file, where ctx is the filectx of the node where
844 in the file, where ctx is the filectx of the node where
846 that line was last changed.
845 that line was last changed.
847 This returns tuples of ((ctx, linenumber), line) for each line,
846 This returns tuples of ((ctx, linenumber), line) for each line,
848 if "linenumber" parameter is NOT "None".
847 if "linenumber" parameter is NOT "None".
849 In such tuples, linenumber means one at the first appearance
848 In such tuples, linenumber means one at the first appearance
850 in the managed file.
849 in the managed file.
851 To reduce annotation cost,
850 To reduce annotation cost,
852 this returns fixed value(False is used) as linenumber,
851 this returns fixed value(False is used) as linenumber,
853 if "linenumber" parameter is "False".'''
852 if "linenumber" parameter is "False".'''
854
853
855 if linenumber is None:
854 if linenumber is None:
856 def decorate(text, rev):
855 def decorate(text, rev):
857 return ([rev] * len(text.splitlines()), text)
856 return ([rev] * len(text.splitlines()), text)
858 elif linenumber:
857 elif linenumber:
859 def decorate(text, rev):
858 def decorate(text, rev):
860 size = len(text.splitlines())
859 size = len(text.splitlines())
861 return ([(rev, i) for i in xrange(1, size + 1)], text)
860 return ([(rev, i) for i in xrange(1, size + 1)], text)
862 else:
861 else:
863 def decorate(text, rev):
862 def decorate(text, rev):
864 return ([(rev, False)] * len(text.splitlines()), text)
863 return ([(rev, False)] * len(text.splitlines()), text)
865
864
866 def pair(parent, child):
865 def pair(parent, child):
867 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
866 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
868 refine=True)
867 refine=True)
869 for (a1, a2, b1, b2), t in blocks:
868 for (a1, a2, b1, b2), t in blocks:
870 # Changed blocks ('!') or blocks made only of blank lines ('~')
869 # Changed blocks ('!') or blocks made only of blank lines ('~')
871 # belong to the child.
870 # belong to the child.
872 if t == '=':
871 if t == '=':
873 child[0][b1:b2] = parent[0][a1:a2]
872 child[0][b1:b2] = parent[0][a1:a2]
874 return child
873 return child
875
874
876 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
875 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
877
876
878 def parents(f):
877 def parents(f):
879 pl = f.parents()
878 pl = f.parents()
880
879
881 # Don't return renamed parents if we aren't following.
880 # Don't return renamed parents if we aren't following.
882 if not follow:
881 if not follow:
883 pl = [p for p in pl if p.path() == f.path()]
882 pl = [p for p in pl if p.path() == f.path()]
884
883
885 # renamed filectx won't have a filelog yet, so set it
884 # renamed filectx won't have a filelog yet, so set it
886 # from the cache to save time
885 # from the cache to save time
887 for p in pl:
886 for p in pl:
888 if not '_filelog' in p.__dict__:
887 if not '_filelog' in p.__dict__:
889 p._filelog = getlog(p.path())
888 p._filelog = getlog(p.path())
890
889
891 return pl
890 return pl
892
891
893 # use linkrev to find the first changeset where self appeared
892 # use linkrev to find the first changeset where self appeared
894 base = self
893 base = self
895 introrev = self.introrev()
894 introrev = self.introrev()
896 if self.rev() != introrev:
895 if self.rev() != introrev:
897 base = filectx(self._repo, self._path, filelog=self.filelog(),
896 base = filectx(self._repo, self._path, filelog=self.filelog(),
898 fileid=self.filenode(), changeid=introrev)
897 fileid=self.filenode(), changeid=introrev)
899
898
900 # This algorithm would prefer to be recursive, but Python is a
899 # This algorithm would prefer to be recursive, but Python is a
901 # bit recursion-hostile. Instead we do an iterative
900 # bit recursion-hostile. Instead we do an iterative
902 # depth-first search.
901 # depth-first search.
903
902
904 visit = [base]
903 visit = [base]
905 hist = {}
904 hist = {}
906 pcache = {}
905 pcache = {}
907 needed = {base: 1}
906 needed = {base: 1}
908 while visit:
907 while visit:
909 f = visit[-1]
908 f = visit[-1]
910 pcached = f in pcache
909 pcached = f in pcache
911 if not pcached:
910 if not pcached:
912 pcache[f] = parents(f)
911 pcache[f] = parents(f)
913
912
914 ready = True
913 ready = True
915 pl = pcache[f]
914 pl = pcache[f]
916 for p in pl:
915 for p in pl:
917 if p not in hist:
916 if p not in hist:
918 ready = False
917 ready = False
919 visit.append(p)
918 visit.append(p)
920 if not pcached:
919 if not pcached:
921 needed[p] = needed.get(p, 0) + 1
920 needed[p] = needed.get(p, 0) + 1
922 if ready:
921 if ready:
923 visit.pop()
922 visit.pop()
924 reusable = f in hist
923 reusable = f in hist
925 if reusable:
924 if reusable:
926 curr = hist[f]
925 curr = hist[f]
927 else:
926 else:
928 curr = decorate(f.data(), f)
927 curr = decorate(f.data(), f)
929 for p in pl:
928 for p in pl:
930 if not reusable:
929 if not reusable:
931 curr = pair(hist[p], curr)
930 curr = pair(hist[p], curr)
932 if needed[p] == 1:
931 if needed[p] == 1:
933 del hist[p]
932 del hist[p]
934 del needed[p]
933 del needed[p]
935 else:
934 else:
936 needed[p] -= 1
935 needed[p] -= 1
937
936
938 hist[f] = curr
937 hist[f] = curr
939 pcache[f] = []
938 pcache[f] = []
940
939
941 return zip(hist[base][0], hist[base][1].splitlines(True))
940 return zip(hist[base][0], hist[base][1].splitlines(True))
942
941
943 def ancestors(self, followfirst=False):
942 def ancestors(self, followfirst=False):
944 visit = {}
943 visit = {}
945 c = self
944 c = self
946 cut = followfirst and 1 or None
945 cut = followfirst and 1 or None
947 while True:
946 while True:
948 for parent in c.parents()[:cut]:
947 for parent in c.parents()[:cut]:
949 visit[(parent.rev(), parent.node())] = parent
948 visit[(parent.rev(), parent.node())] = parent
950 if not visit:
949 if not visit:
951 break
950 break
952 c = visit.pop(max(visit))
951 c = visit.pop(max(visit))
953 yield c
952 yield c
954
953
955 class filectx(basefilectx):
954 class filectx(basefilectx):
956 """A filecontext object makes access to data related to a particular
955 """A filecontext object makes access to data related to a particular
957 filerevision convenient."""
956 filerevision convenient."""
958 def __init__(self, repo, path, changeid=None, fileid=None,
957 def __init__(self, repo, path, changeid=None, fileid=None,
959 filelog=None, changectx=None):
958 filelog=None, changectx=None):
960 """changeid can be a changeset revision, node, or tag.
959 """changeid can be a changeset revision, node, or tag.
961 fileid can be a file revision or node."""
960 fileid can be a file revision or node."""
962 self._repo = repo
961 self._repo = repo
963 self._path = path
962 self._path = path
964
963
965 assert (changeid is not None
964 assert (changeid is not None
966 or fileid is not None
965 or fileid is not None
967 or changectx is not None), \
966 or changectx is not None), \
968 ("bad args: changeid=%r, fileid=%r, changectx=%r"
967 ("bad args: changeid=%r, fileid=%r, changectx=%r"
969 % (changeid, fileid, changectx))
968 % (changeid, fileid, changectx))
970
969
971 if filelog is not None:
970 if filelog is not None:
972 self._filelog = filelog
971 self._filelog = filelog
973
972
974 if changeid is not None:
973 if changeid is not None:
975 self._changeid = changeid
974 self._changeid = changeid
976 if changectx is not None:
975 if changectx is not None:
977 self._changectx = changectx
976 self._changectx = changectx
978 if fileid is not None:
977 if fileid is not None:
979 self._fileid = fileid
978 self._fileid = fileid
980
979
981 @propertycache
980 @propertycache
982 def _changectx(self):
981 def _changectx(self):
983 try:
982 try:
984 return changectx(self._repo, self._changeid)
983 return changectx(self._repo, self._changeid)
985 except error.FilteredRepoLookupError:
984 except error.FilteredRepoLookupError:
986 # Linkrev may point to any revision in the repository. When the
985 # Linkrev may point to any revision in the repository. When the
987 # repository is filtered this may lead to `filectx` trying to build
986 # repository is filtered this may lead to `filectx` trying to build
988 # `changectx` for filtered revision. In such case we fallback to
987 # `changectx` for filtered revision. In such case we fallback to
989 # creating `changectx` on the unfiltered version of the reposition.
988 # creating `changectx` on the unfiltered version of the reposition.
990 # This fallback should not be an issue because `changectx` from
989 # This fallback should not be an issue because `changectx` from
991 # `filectx` are not used in complex operations that care about
990 # `filectx` are not used in complex operations that care about
992 # filtering.
991 # filtering.
993 #
992 #
994 # This fallback is a cheap and dirty fix that prevent several
993 # This fallback is a cheap and dirty fix that prevent several
995 # crashes. It does not ensure the behavior is correct. However the
994 # crashes. It does not ensure the behavior is correct. However the
996 # behavior was not correct before filtering either and "incorrect
995 # behavior was not correct before filtering either and "incorrect
997 # behavior" is seen as better as "crash"
996 # behavior" is seen as better as "crash"
998 #
997 #
999 # Linkrevs have several serious troubles with filtering that are
998 # Linkrevs have several serious troubles with filtering that are
1000 # complicated to solve. Proper handling of the issue here should be
999 # complicated to solve. Proper handling of the issue here should be
1001 # considered when solving linkrev issue are on the table.
1000 # considered when solving linkrev issue are on the table.
1002 return changectx(self._repo.unfiltered(), self._changeid)
1001 return changectx(self._repo.unfiltered(), self._changeid)
1003
1002
1004 def filectx(self, fileid):
1003 def filectx(self, fileid):
1005 '''opens an arbitrary revision of the file without
1004 '''opens an arbitrary revision of the file without
1006 opening a new filelog'''
1005 opening a new filelog'''
1007 return filectx(self._repo, self._path, fileid=fileid,
1006 return filectx(self._repo, self._path, fileid=fileid,
1008 filelog=self._filelog)
1007 filelog=self._filelog)
1009
1008
1010 def data(self):
1009 def data(self):
1011 try:
1010 try:
1012 return self._filelog.read(self._filenode)
1011 return self._filelog.read(self._filenode)
1013 except error.CensoredNodeError:
1012 except error.CensoredNodeError:
1014 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1013 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1015 return ""
1014 return ""
1016 raise util.Abort(_("censored node: %s") % short(self._filenode),
1015 raise util.Abort(_("censored node: %s") % short(self._filenode),
1017 hint=_("set censor.policy to ignore errors"))
1016 hint=_("set censor.policy to ignore errors"))
1018
1017
1019 def size(self):
1018 def size(self):
1020 return self._filelog.size(self._filerev)
1019 return self._filelog.size(self._filerev)
1021
1020
1022 def renamed(self):
1021 def renamed(self):
1023 """check if file was actually renamed in this changeset revision
1022 """check if file was actually renamed in this changeset revision
1024
1023
1025 If rename logged in file revision, we report copy for changeset only
1024 If rename logged in file revision, we report copy for changeset only
1026 if file revisions linkrev points back to the changeset in question
1025 if file revisions linkrev points back to the changeset in question
1027 or both changeset parents contain different file revisions.
1026 or both changeset parents contain different file revisions.
1028 """
1027 """
1029
1028
1030 renamed = self._filelog.renamed(self._filenode)
1029 renamed = self._filelog.renamed(self._filenode)
1031 if not renamed:
1030 if not renamed:
1032 return renamed
1031 return renamed
1033
1032
1034 if self.rev() == self.linkrev():
1033 if self.rev() == self.linkrev():
1035 return renamed
1034 return renamed
1036
1035
1037 name = self.path()
1036 name = self.path()
1038 fnode = self._filenode
1037 fnode = self._filenode
1039 for p in self._changectx.parents():
1038 for p in self._changectx.parents():
1040 try:
1039 try:
1041 if fnode == p.filenode(name):
1040 if fnode == p.filenode(name):
1042 return None
1041 return None
1043 except error.LookupError:
1042 except error.LookupError:
1044 pass
1043 pass
1045 return renamed
1044 return renamed
1046
1045
1047 def children(self):
1046 def children(self):
1048 # hard for renames
1047 # hard for renames
1049 c = self._filelog.children(self._filenode)
1048 c = self._filelog.children(self._filenode)
1050 return [filectx(self._repo, self._path, fileid=x,
1049 return [filectx(self._repo, self._path, fileid=x,
1051 filelog=self._filelog) for x in c]
1050 filelog=self._filelog) for x in c]
1052
1051
1053 class committablectx(basectx):
1052 class committablectx(basectx):
1054 """A committablectx object provides common functionality for a context that
1053 """A committablectx object provides common functionality for a context that
1055 wants the ability to commit, e.g. workingctx or memctx."""
1054 wants the ability to commit, e.g. workingctx or memctx."""
1056 def __init__(self, repo, text="", user=None, date=None, extra=None,
1055 def __init__(self, repo, text="", user=None, date=None, extra=None,
1057 changes=None):
1056 changes=None):
1058 self._repo = repo
1057 self._repo = repo
1059 self._rev = None
1058 self._rev = None
1060 self._node = None
1059 self._node = None
1061 self._text = text
1060 self._text = text
1062 if date:
1061 if date:
1063 self._date = util.parsedate(date)
1062 self._date = util.parsedate(date)
1064 if user:
1063 if user:
1065 self._user = user
1064 self._user = user
1066 if changes:
1065 if changes:
1067 self._status = changes
1066 self._status = changes
1068
1067
1069 self._extra = {}
1068 self._extra = {}
1070 if extra:
1069 if extra:
1071 self._extra = extra.copy()
1070 self._extra = extra.copy()
1072 if 'branch' not in self._extra:
1071 if 'branch' not in self._extra:
1073 try:
1072 try:
1074 branch = encoding.fromlocal(self._repo.dirstate.branch())
1073 branch = encoding.fromlocal(self._repo.dirstate.branch())
1075 except UnicodeDecodeError:
1074 except UnicodeDecodeError:
1076 raise util.Abort(_('branch name not in UTF-8!'))
1075 raise util.Abort(_('branch name not in UTF-8!'))
1077 self._extra['branch'] = branch
1076 self._extra['branch'] = branch
1078 if self._extra['branch'] == '':
1077 if self._extra['branch'] == '':
1079 self._extra['branch'] = 'default'
1078 self._extra['branch'] = 'default'
1080
1079
1081 def __str__(self):
1080 def __str__(self):
1082 return str(self._parents[0]) + "+"
1081 return str(self._parents[0]) + "+"
1083
1082
1084 def __nonzero__(self):
1083 def __nonzero__(self):
1085 return True
1084 return True
1086
1085
1087 def _buildflagfunc(self):
1086 def _buildflagfunc(self):
1088 # Create a fallback function for getting file flags when the
1087 # Create a fallback function for getting file flags when the
1089 # filesystem doesn't support them
1088 # filesystem doesn't support them
1090
1089
1091 copiesget = self._repo.dirstate.copies().get
1090 copiesget = self._repo.dirstate.copies().get
1092
1091
1093 if len(self._parents) < 2:
1092 if len(self._parents) < 2:
1094 # when we have one parent, it's easy: copy from parent
1093 # when we have one parent, it's easy: copy from parent
1095 man = self._parents[0].manifest()
1094 man = self._parents[0].manifest()
1096 def func(f):
1095 def func(f):
1097 f = copiesget(f, f)
1096 f = copiesget(f, f)
1098 return man.flags(f)
1097 return man.flags(f)
1099 else:
1098 else:
1100 # merges are tricky: we try to reconstruct the unstored
1099 # merges are tricky: we try to reconstruct the unstored
1101 # result from the merge (issue1802)
1100 # result from the merge (issue1802)
1102 p1, p2 = self._parents
1101 p1, p2 = self._parents
1103 pa = p1.ancestor(p2)
1102 pa = p1.ancestor(p2)
1104 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1103 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1105
1104
1106 def func(f):
1105 def func(f):
1107 f = copiesget(f, f) # may be wrong for merges with copies
1106 f = copiesget(f, f) # may be wrong for merges with copies
1108 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1107 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1109 if fl1 == fl2:
1108 if fl1 == fl2:
1110 return fl1
1109 return fl1
1111 if fl1 == fla:
1110 if fl1 == fla:
1112 return fl2
1111 return fl2
1113 if fl2 == fla:
1112 if fl2 == fla:
1114 return fl1
1113 return fl1
1115 return '' # punt for conflicts
1114 return '' # punt for conflicts
1116
1115
1117 return func
1116 return func
1118
1117
1119 @propertycache
1118 @propertycache
1120 def _flagfunc(self):
1119 def _flagfunc(self):
1121 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1120 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1122
1121
1123 @propertycache
1122 @propertycache
1124 def _manifest(self):
1123 def _manifest(self):
1125 """generate a manifest corresponding to the values in self._status
1124 """generate a manifest corresponding to the values in self._status
1126
1125
1127 This reuse the file nodeid from parent, but we append an extra letter
1126 This reuse the file nodeid from parent, but we append an extra letter
1128 when modified. Modified files get an extra 'm' while added files get
1127 when modified. Modified files get an extra 'm' while added files get
1129 an extra 'a'. This is used by manifests merge to see that files
1128 an extra 'a'. This is used by manifests merge to see that files
1130 are different and by update logic to avoid deleting newly added files.
1129 are different and by update logic to avoid deleting newly added files.
1131 """
1130 """
1132
1131
1133 man1 = self._parents[0].manifest()
1132 man1 = self._parents[0].manifest()
1134 man = man1.copy()
1133 man = man1.copy()
1135 if len(self._parents) > 1:
1134 if len(self._parents) > 1:
1136 man2 = self.p2().manifest()
1135 man2 = self.p2().manifest()
1137 def getman(f):
1136 def getman(f):
1138 if f in man1:
1137 if f in man1:
1139 return man1
1138 return man1
1140 return man2
1139 return man2
1141 else:
1140 else:
1142 getman = lambda f: man1
1141 getman = lambda f: man1
1143
1142
1144 copied = self._repo.dirstate.copies()
1143 copied = self._repo.dirstate.copies()
1145 ff = self._flagfunc
1144 ff = self._flagfunc
1146 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1145 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1147 for f in l:
1146 for f in l:
1148 orig = copied.get(f, f)
1147 orig = copied.get(f, f)
1149 man[f] = getman(orig).get(orig, nullid) + i
1148 man[f] = getman(orig).get(orig, nullid) + i
1150 try:
1149 try:
1151 man.setflag(f, ff(f))
1150 man.setflag(f, ff(f))
1152 except OSError:
1151 except OSError:
1153 pass
1152 pass
1154
1153
1155 for f in self._status.deleted + self._status.removed:
1154 for f in self._status.deleted + self._status.removed:
1156 if f in man:
1155 if f in man:
1157 del man[f]
1156 del man[f]
1158
1157
1159 return man
1158 return man
1160
1159
1161 @propertycache
1160 @propertycache
1162 def _status(self):
1161 def _status(self):
1163 return self._repo.status()
1162 return self._repo.status()
1164
1163
1165 @propertycache
1164 @propertycache
1166 def _user(self):
1165 def _user(self):
1167 return self._repo.ui.username()
1166 return self._repo.ui.username()
1168
1167
1169 @propertycache
1168 @propertycache
1170 def _date(self):
1169 def _date(self):
1171 return util.makedate()
1170 return util.makedate()
1172
1171
1173 def subrev(self, subpath):
1172 def subrev(self, subpath):
1174 return None
1173 return None
1175
1174
1176 def user(self):
1175 def user(self):
1177 return self._user or self._repo.ui.username()
1176 return self._user or self._repo.ui.username()
1178 def date(self):
1177 def date(self):
1179 return self._date
1178 return self._date
1180 def description(self):
1179 def description(self):
1181 return self._text
1180 return self._text
1182 def files(self):
1181 def files(self):
1183 return sorted(self._status.modified + self._status.added +
1182 return sorted(self._status.modified + self._status.added +
1184 self._status.removed)
1183 self._status.removed)
1185
1184
1186 def modified(self):
1185 def modified(self):
1187 return self._status.modified
1186 return self._status.modified
1188 def added(self):
1187 def added(self):
1189 return self._status.added
1188 return self._status.added
1190 def removed(self):
1189 def removed(self):
1191 return self._status.removed
1190 return self._status.removed
1192 def deleted(self):
1191 def deleted(self):
1193 return self._status.deleted
1192 return self._status.deleted
1194 def branch(self):
1193 def branch(self):
1195 return encoding.tolocal(self._extra['branch'])
1194 return encoding.tolocal(self._extra['branch'])
1196 def closesbranch(self):
1195 def closesbranch(self):
1197 return 'close' in self._extra
1196 return 'close' in self._extra
1198 def extra(self):
1197 def extra(self):
1199 return self._extra
1198 return self._extra
1200
1199
1201 def tags(self):
1200 def tags(self):
1202 t = []
1201 t = []
1203 for p in self.parents():
1202 for p in self.parents():
1204 t.extend(p.tags())
1203 t.extend(p.tags())
1205 return t
1204 return t
1206
1205
1207 def bookmarks(self):
1206 def bookmarks(self):
1208 b = []
1207 b = []
1209 for p in self.parents():
1208 for p in self.parents():
1210 b.extend(p.bookmarks())
1209 b.extend(p.bookmarks())
1211 return b
1210 return b
1212
1211
1213 def phase(self):
1212 def phase(self):
1214 phase = phases.draft # default phase to draft
1213 phase = phases.draft # default phase to draft
1215 for p in self.parents():
1214 for p in self.parents():
1216 phase = max(phase, p.phase())
1215 phase = max(phase, p.phase())
1217 return phase
1216 return phase
1218
1217
1219 def hidden(self):
1218 def hidden(self):
1220 return False
1219 return False
1221
1220
1222 def children(self):
1221 def children(self):
1223 return []
1222 return []
1224
1223
1225 def flags(self, path):
1224 def flags(self, path):
1226 if '_manifest' in self.__dict__:
1225 if '_manifest' in self.__dict__:
1227 try:
1226 try:
1228 return self._manifest.flags(path)
1227 return self._manifest.flags(path)
1229 except KeyError:
1228 except KeyError:
1230 return ''
1229 return ''
1231
1230
1232 try:
1231 try:
1233 return self._flagfunc(path)
1232 return self._flagfunc(path)
1234 except OSError:
1233 except OSError:
1235 return ''
1234 return ''
1236
1235
1237 def ancestor(self, c2):
1236 def ancestor(self, c2):
1238 """return the "best" ancestor context of self and c2"""
1237 """return the "best" ancestor context of self and c2"""
1239 return self._parents[0].ancestor(c2) # punt on two parents for now
1238 return self._parents[0].ancestor(c2) # punt on two parents for now
1240
1239
1241 def walk(self, match):
1240 def walk(self, match):
1242 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1241 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1243 True, False))
1242 True, False))
1244
1243
1245 def matches(self, match):
1244 def matches(self, match):
1246 return sorted(self._repo.dirstate.matches(match))
1245 return sorted(self._repo.dirstate.matches(match))
1247
1246
1248 def ancestors(self):
1247 def ancestors(self):
1249 for p in self._parents:
1248 for p in self._parents:
1250 yield p
1249 yield p
1251 for a in self._repo.changelog.ancestors(
1250 for a in self._repo.changelog.ancestors(
1252 [p.rev() for p in self._parents]):
1251 [p.rev() for p in self._parents]):
1253 yield changectx(self._repo, a)
1252 yield changectx(self._repo, a)
1254
1253
1255 def markcommitted(self, node):
1254 def markcommitted(self, node):
1256 """Perform post-commit cleanup necessary after committing this ctx
1255 """Perform post-commit cleanup necessary after committing this ctx
1257
1256
1258 Specifically, this updates backing stores this working context
1257 Specifically, this updates backing stores this working context
1259 wraps to reflect the fact that the changes reflected by this
1258 wraps to reflect the fact that the changes reflected by this
1260 workingctx have been committed. For example, it marks
1259 workingctx have been committed. For example, it marks
1261 modified and added files as normal in the dirstate.
1260 modified and added files as normal in the dirstate.
1262
1261
1263 """
1262 """
1264
1263
1265 self._repo.dirstate.beginparentchange()
1264 self._repo.dirstate.beginparentchange()
1266 for f in self.modified() + self.added():
1265 for f in self.modified() + self.added():
1267 self._repo.dirstate.normal(f)
1266 self._repo.dirstate.normal(f)
1268 for f in self.removed():
1267 for f in self.removed():
1269 self._repo.dirstate.drop(f)
1268 self._repo.dirstate.drop(f)
1270 self._repo.dirstate.setparents(node)
1269 self._repo.dirstate.setparents(node)
1271 self._repo.dirstate.endparentchange()
1270 self._repo.dirstate.endparentchange()
1272
1271
1273 def dirs(self):
1272 def dirs(self):
1274 return self._repo.dirstate.dirs()
1273 return self._repo.dirstate.dirs()
1275
1274
1276 class workingctx(committablectx):
1275 class workingctx(committablectx):
1277 """A workingctx object makes access to data related to
1276 """A workingctx object makes access to data related to
1278 the current working directory convenient.
1277 the current working directory convenient.
1279 date - any valid date string or (unixtime, offset), or None.
1278 date - any valid date string or (unixtime, offset), or None.
1280 user - username string, or None.
1279 user - username string, or None.
1281 extra - a dictionary of extra values, or None.
1280 extra - a dictionary of extra values, or None.
1282 changes - a list of file lists as returned by localrepo.status()
1281 changes - a list of file lists as returned by localrepo.status()
1283 or None to use the repository status.
1282 or None to use the repository status.
1284 """
1283 """
1285 def __init__(self, repo, text="", user=None, date=None, extra=None,
1284 def __init__(self, repo, text="", user=None, date=None, extra=None,
1286 changes=None):
1285 changes=None):
1287 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1286 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1288
1287
1289 def __iter__(self):
1288 def __iter__(self):
1290 d = self._repo.dirstate
1289 d = self._repo.dirstate
1291 for f in d:
1290 for f in d:
1292 if d[f] != 'r':
1291 if d[f] != 'r':
1293 yield f
1292 yield f
1294
1293
1295 def __contains__(self, key):
1294 def __contains__(self, key):
1296 return self._repo.dirstate[key] not in "?r"
1295 return self._repo.dirstate[key] not in "?r"
1297
1296
1298 @propertycache
1297 @propertycache
1299 def _parents(self):
1298 def _parents(self):
1300 p = self._repo.dirstate.parents()
1299 p = self._repo.dirstate.parents()
1301 if p[1] == nullid:
1300 if p[1] == nullid:
1302 p = p[:-1]
1301 p = p[:-1]
1303 return [changectx(self._repo, x) for x in p]
1302 return [changectx(self._repo, x) for x in p]
1304
1303
1305 def filectx(self, path, filelog=None):
1304 def filectx(self, path, filelog=None):
1306 """get a file context from the working directory"""
1305 """get a file context from the working directory"""
1307 return workingfilectx(self._repo, path, workingctx=self,
1306 return workingfilectx(self._repo, path, workingctx=self,
1308 filelog=filelog)
1307 filelog=filelog)
1309
1308
1310 def dirty(self, missing=False, merge=True, branch=True):
1309 def dirty(self, missing=False, merge=True, branch=True):
1311 "check whether a working directory is modified"
1310 "check whether a working directory is modified"
1312 # check subrepos first
1311 # check subrepos first
1313 for s in sorted(self.substate):
1312 for s in sorted(self.substate):
1314 if self.sub(s).dirty():
1313 if self.sub(s).dirty():
1315 return True
1314 return True
1316 # check current working dir
1315 # check current working dir
1317 return ((merge and self.p2()) or
1316 return ((merge and self.p2()) or
1318 (branch and self.branch() != self.p1().branch()) or
1317 (branch and self.branch() != self.p1().branch()) or
1319 self.modified() or self.added() or self.removed() or
1318 self.modified() or self.added() or self.removed() or
1320 (missing and self.deleted()))
1319 (missing and self.deleted()))
1321
1320
1322 def add(self, list, prefix=""):
1321 def add(self, list, prefix=""):
1323 join = lambda f: os.path.join(prefix, f)
1322 join = lambda f: os.path.join(prefix, f)
1324 wlock = self._repo.wlock()
1323 wlock = self._repo.wlock()
1325 ui, ds = self._repo.ui, self._repo.dirstate
1324 ui, ds = self._repo.ui, self._repo.dirstate
1326 try:
1325 try:
1327 rejected = []
1326 rejected = []
1328 lstat = self._repo.wvfs.lstat
1327 lstat = self._repo.wvfs.lstat
1329 for f in list:
1328 for f in list:
1330 scmutil.checkportable(ui, join(f))
1329 scmutil.checkportable(ui, join(f))
1331 try:
1330 try:
1332 st = lstat(f)
1331 st = lstat(f)
1333 except OSError:
1332 except OSError:
1334 ui.warn(_("%s does not exist!\n") % join(f))
1333 ui.warn(_("%s does not exist!\n") % join(f))
1335 rejected.append(f)
1334 rejected.append(f)
1336 continue
1335 continue
1337 if st.st_size > 10000000:
1336 if st.st_size > 10000000:
1338 ui.warn(_("%s: up to %d MB of RAM may be required "
1337 ui.warn(_("%s: up to %d MB of RAM may be required "
1339 "to manage this file\n"
1338 "to manage this file\n"
1340 "(use 'hg revert %s' to cancel the "
1339 "(use 'hg revert %s' to cancel the "
1341 "pending addition)\n")
1340 "pending addition)\n")
1342 % (f, 3 * st.st_size // 1000000, join(f)))
1341 % (f, 3 * st.st_size // 1000000, join(f)))
1343 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1344 ui.warn(_("%s not added: only files and symlinks "
1343 ui.warn(_("%s not added: only files and symlinks "
1345 "supported currently\n") % join(f))
1344 "supported currently\n") % join(f))
1346 rejected.append(f)
1345 rejected.append(f)
1347 elif ds[f] in 'amn':
1346 elif ds[f] in 'amn':
1348 ui.warn(_("%s already tracked!\n") % join(f))
1347 ui.warn(_("%s already tracked!\n") % join(f))
1349 elif ds[f] == 'r':
1348 elif ds[f] == 'r':
1350 ds.normallookup(f)
1349 ds.normallookup(f)
1351 else:
1350 else:
1352 ds.add(f)
1351 ds.add(f)
1353 return rejected
1352 return rejected
1354 finally:
1353 finally:
1355 wlock.release()
1354 wlock.release()
1356
1355
1357 def forget(self, files, prefix=""):
1356 def forget(self, files, prefix=""):
1358 join = lambda f: os.path.join(prefix, f)
1357 join = lambda f: os.path.join(prefix, f)
1359 wlock = self._repo.wlock()
1358 wlock = self._repo.wlock()
1360 try:
1359 try:
1361 rejected = []
1360 rejected = []
1362 for f in files:
1361 for f in files:
1363 if f not in self._repo.dirstate:
1362 if f not in self._repo.dirstate:
1364 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1363 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1365 rejected.append(f)
1364 rejected.append(f)
1366 elif self._repo.dirstate[f] != 'a':
1365 elif self._repo.dirstate[f] != 'a':
1367 self._repo.dirstate.remove(f)
1366 self._repo.dirstate.remove(f)
1368 else:
1367 else:
1369 self._repo.dirstate.drop(f)
1368 self._repo.dirstate.drop(f)
1370 return rejected
1369 return rejected
1371 finally:
1370 finally:
1372 wlock.release()
1371 wlock.release()
1373
1372
1374 def undelete(self, list):
1373 def undelete(self, list):
1375 pctxs = self.parents()
1374 pctxs = self.parents()
1376 wlock = self._repo.wlock()
1375 wlock = self._repo.wlock()
1377 try:
1376 try:
1378 for f in list:
1377 for f in list:
1379 if self._repo.dirstate[f] != 'r':
1378 if self._repo.dirstate[f] != 'r':
1380 self._repo.ui.warn(_("%s not removed!\n") % f)
1379 self._repo.ui.warn(_("%s not removed!\n") % f)
1381 else:
1380 else:
1382 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1381 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1383 t = fctx.data()
1382 t = fctx.data()
1384 self._repo.wwrite(f, t, fctx.flags())
1383 self._repo.wwrite(f, t, fctx.flags())
1385 self._repo.dirstate.normal(f)
1384 self._repo.dirstate.normal(f)
1386 finally:
1385 finally:
1387 wlock.release()
1386 wlock.release()
1388
1387
1389 def copy(self, source, dest):
1388 def copy(self, source, dest):
1390 try:
1389 try:
1391 st = self._repo.wvfs.lstat(dest)
1390 st = self._repo.wvfs.lstat(dest)
1392 except OSError, err:
1391 except OSError, err:
1393 if err.errno != errno.ENOENT:
1392 if err.errno != errno.ENOENT:
1394 raise
1393 raise
1395 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1394 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1396 return
1395 return
1397 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1396 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1398 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1397 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1399 "symbolic link\n") % dest)
1398 "symbolic link\n") % dest)
1400 else:
1399 else:
1401 wlock = self._repo.wlock()
1400 wlock = self._repo.wlock()
1402 try:
1401 try:
1403 if self._repo.dirstate[dest] in '?':
1402 if self._repo.dirstate[dest] in '?':
1404 self._repo.dirstate.add(dest)
1403 self._repo.dirstate.add(dest)
1405 elif self._repo.dirstate[dest] in 'r':
1404 elif self._repo.dirstate[dest] in 'r':
1406 self._repo.dirstate.normallookup(dest)
1405 self._repo.dirstate.normallookup(dest)
1407 self._repo.dirstate.copy(source, dest)
1406 self._repo.dirstate.copy(source, dest)
1408 finally:
1407 finally:
1409 wlock.release()
1408 wlock.release()
1410
1409
1411 def _filtersuspectsymlink(self, files):
1410 def _filtersuspectsymlink(self, files):
1412 if not files or self._repo.dirstate._checklink:
1411 if not files or self._repo.dirstate._checklink:
1413 return files
1412 return files
1414
1413
1415 # Symlink placeholders may get non-symlink-like contents
1414 # Symlink placeholders may get non-symlink-like contents
1416 # via user error or dereferencing by NFS or Samba servers,
1415 # via user error or dereferencing by NFS or Samba servers,
1417 # so we filter out any placeholders that don't look like a
1416 # so we filter out any placeholders that don't look like a
1418 # symlink
1417 # symlink
1419 sane = []
1418 sane = []
1420 for f in files:
1419 for f in files:
1421 if self.flags(f) == 'l':
1420 if self.flags(f) == 'l':
1422 d = self[f].data()
1421 d = self[f].data()
1423 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1422 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1424 self._repo.ui.debug('ignoring suspect symlink placeholder'
1423 self._repo.ui.debug('ignoring suspect symlink placeholder'
1425 ' "%s"\n' % f)
1424 ' "%s"\n' % f)
1426 continue
1425 continue
1427 sane.append(f)
1426 sane.append(f)
1428 return sane
1427 return sane
1429
1428
1430 def _checklookup(self, files):
1429 def _checklookup(self, files):
1431 # check for any possibly clean files
1430 # check for any possibly clean files
1432 if not files:
1431 if not files:
1433 return [], []
1432 return [], []
1434
1433
1435 modified = []
1434 modified = []
1436 fixup = []
1435 fixup = []
1437 pctx = self._parents[0]
1436 pctx = self._parents[0]
1438 # do a full compare of any files that might have changed
1437 # do a full compare of any files that might have changed
1439 for f in sorted(files):
1438 for f in sorted(files):
1440 if (f not in pctx or self.flags(f) != pctx.flags(f)
1439 if (f not in pctx or self.flags(f) != pctx.flags(f)
1441 or pctx[f].cmp(self[f])):
1440 or pctx[f].cmp(self[f])):
1442 modified.append(f)
1441 modified.append(f)
1443 else:
1442 else:
1444 fixup.append(f)
1443 fixup.append(f)
1445
1444
1446 # update dirstate for files that are actually clean
1445 # update dirstate for files that are actually clean
1447 if fixup:
1446 if fixup:
1448 try:
1447 try:
1449 # updating the dirstate is optional
1448 # updating the dirstate is optional
1450 # so we don't wait on the lock
1449 # so we don't wait on the lock
1451 # wlock can invalidate the dirstate, so cache normal _after_
1450 # wlock can invalidate the dirstate, so cache normal _after_
1452 # taking the lock
1451 # taking the lock
1453 wlock = self._repo.wlock(False)
1452 wlock = self._repo.wlock(False)
1454 normal = self._repo.dirstate.normal
1453 normal = self._repo.dirstate.normal
1455 try:
1454 try:
1456 for f in fixup:
1455 for f in fixup:
1457 normal(f)
1456 normal(f)
1458 finally:
1457 finally:
1459 wlock.release()
1458 wlock.release()
1460 except error.LockError:
1459 except error.LockError:
1461 pass
1460 pass
1462 return modified, fixup
1461 return modified, fixup
1463
1462
1464 def _manifestmatches(self, match, s):
1463 def _manifestmatches(self, match, s):
1465 """Slow path for workingctx
1464 """Slow path for workingctx
1466
1465
1467 The fast path is when we compare the working directory to its parent
1466 The fast path is when we compare the working directory to its parent
1468 which means this function is comparing with a non-parent; therefore we
1467 which means this function is comparing with a non-parent; therefore we
1469 need to build a manifest and return what matches.
1468 need to build a manifest and return what matches.
1470 """
1469 """
1471 mf = self._repo['.']._manifestmatches(match, s)
1470 mf = self._repo['.']._manifestmatches(match, s)
1472 for f in s.modified + s.added:
1471 for f in s.modified + s.added:
1473 mf[f] = _newnode
1472 mf[f] = _newnode
1474 mf.setflag(f, self.flags(f))
1473 mf.setflag(f, self.flags(f))
1475 for f in s.removed:
1474 for f in s.removed:
1476 if f in mf:
1475 if f in mf:
1477 del mf[f]
1476 del mf[f]
1478 return mf
1477 return mf
1479
1478
1480 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1479 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1481 unknown=False):
1480 unknown=False):
1482 '''Gets the status from the dirstate -- internal use only.'''
1481 '''Gets the status from the dirstate -- internal use only.'''
1483 listignored, listclean, listunknown = ignored, clean, unknown
1482 listignored, listclean, listunknown = ignored, clean, unknown
1484 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1483 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1485 subrepos = []
1484 subrepos = []
1486 if '.hgsub' in self:
1485 if '.hgsub' in self:
1487 subrepos = sorted(self.substate)
1486 subrepos = sorted(self.substate)
1488 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1487 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1489 listclean, listunknown)
1488 listclean, listunknown)
1490
1489
1491 # check for any possibly clean files
1490 # check for any possibly clean files
1492 if cmp:
1491 if cmp:
1493 modified2, fixup = self._checklookup(cmp)
1492 modified2, fixup = self._checklookup(cmp)
1494 s.modified.extend(modified2)
1493 s.modified.extend(modified2)
1495
1494
1496 # update dirstate for files that are actually clean
1495 # update dirstate for files that are actually clean
1497 if fixup and listclean:
1496 if fixup and listclean:
1498 s.clean.extend(fixup)
1497 s.clean.extend(fixup)
1499
1498
1500 return s
1499 return s
1501
1500
1502 def _buildstatus(self, other, s, match, listignored, listclean,
1501 def _buildstatus(self, other, s, match, listignored, listclean,
1503 listunknown):
1502 listunknown):
1504 """build a status with respect to another context
1503 """build a status with respect to another context
1505
1504
1506 This includes logic for maintaining the fast path of status when
1505 This includes logic for maintaining the fast path of status when
1507 comparing the working directory against its parent, which is to skip
1506 comparing the working directory against its parent, which is to skip
1508 building a new manifest if self (working directory) is not comparing
1507 building a new manifest if self (working directory) is not comparing
1509 against its parent (repo['.']).
1508 against its parent (repo['.']).
1510 """
1509 """
1511 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1510 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1512 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1511 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1513 # might have accidentally ended up with the entire contents of the file
1512 # might have accidentally ended up with the entire contents of the file
1514 # they are supposed to be linking to.
1513 # they are supposed to be linking to.
1515 s.modified[:] = self._filtersuspectsymlink(s.modified)
1514 s.modified[:] = self._filtersuspectsymlink(s.modified)
1516 if other != self._repo['.']:
1515 if other != self._repo['.']:
1517 s = super(workingctx, self)._buildstatus(other, s, match,
1516 s = super(workingctx, self)._buildstatus(other, s, match,
1518 listignored, listclean,
1517 listignored, listclean,
1519 listunknown)
1518 listunknown)
1520 elif match.always():
1519 elif match.always():
1521 # cache for performance
1520 # cache for performance
1522 if s.unknown or s.ignored or s.clean:
1521 if s.unknown or s.ignored or s.clean:
1523 # "_status" is cached with list*=False in the normal route
1522 # "_status" is cached with list*=False in the normal route
1524 self._status = scmutil.status(s.modified, s.added, s.removed,
1523 self._status = scmutil.status(s.modified, s.added, s.removed,
1525 s.deleted, [], [], [])
1524 s.deleted, [], [], [])
1526 else:
1525 else:
1527 self._status = s
1526 self._status = s
1528 return s
1527 return s
1529
1528
1530 def _matchstatus(self, other, match):
1529 def _matchstatus(self, other, match):
1531 """override the match method with a filter for directory patterns
1530 """override the match method with a filter for directory patterns
1532
1531
1533 We use inheritance to customize the match.bad method only in cases of
1532 We use inheritance to customize the match.bad method only in cases of
1534 workingctx since it belongs only to the working directory when
1533 workingctx since it belongs only to the working directory when
1535 comparing against the parent changeset.
1534 comparing against the parent changeset.
1536
1535
1537 If we aren't comparing against the working directory's parent, then we
1536 If we aren't comparing against the working directory's parent, then we
1538 just use the default match object sent to us.
1537 just use the default match object sent to us.
1539 """
1538 """
1540 superself = super(workingctx, self)
1539 superself = super(workingctx, self)
1541 match = superself._matchstatus(other, match)
1540 match = superself._matchstatus(other, match)
1542 if other != self._repo['.']:
1541 if other != self._repo['.']:
1543 def bad(f, msg):
1542 def bad(f, msg):
1544 # 'f' may be a directory pattern from 'match.files()',
1543 # 'f' may be a directory pattern from 'match.files()',
1545 # so 'f not in ctx1' is not enough
1544 # so 'f not in ctx1' is not enough
1546 if f not in other and f not in other.dirs():
1545 if f not in other and f not in other.dirs():
1547 self._repo.ui.warn('%s: %s\n' %
1546 self._repo.ui.warn('%s: %s\n' %
1548 (self._repo.dirstate.pathto(f), msg))
1547 (self._repo.dirstate.pathto(f), msg))
1549 match.bad = bad
1548 match.bad = bad
1550 return match
1549 return match
1551
1550
1552 class committablefilectx(basefilectx):
1551 class committablefilectx(basefilectx):
1553 """A committablefilectx provides common functionality for a file context
1552 """A committablefilectx provides common functionality for a file context
1554 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1553 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1555 def __init__(self, repo, path, filelog=None, ctx=None):
1554 def __init__(self, repo, path, filelog=None, ctx=None):
1556 self._repo = repo
1555 self._repo = repo
1557 self._path = path
1556 self._path = path
1558 self._changeid = None
1557 self._changeid = None
1559 self._filerev = self._filenode = None
1558 self._filerev = self._filenode = None
1560
1559
1561 if filelog is not None:
1560 if filelog is not None:
1562 self._filelog = filelog
1561 self._filelog = filelog
1563 if ctx:
1562 if ctx:
1564 self._changectx = ctx
1563 self._changectx = ctx
1565
1564
1566 def __nonzero__(self):
1565 def __nonzero__(self):
1567 return True
1566 return True
1568
1567
1569 def parents(self):
1568 def parents(self):
1570 '''return parent filectxs, following copies if necessary'''
1569 '''return parent filectxs, following copies if necessary'''
1571 def filenode(ctx, path):
1570 def filenode(ctx, path):
1572 return ctx._manifest.get(path, nullid)
1571 return ctx._manifest.get(path, nullid)
1573
1572
1574 path = self._path
1573 path = self._path
1575 fl = self._filelog
1574 fl = self._filelog
1576 pcl = self._changectx._parents
1575 pcl = self._changectx._parents
1577 renamed = self.renamed()
1576 renamed = self.renamed()
1578
1577
1579 if renamed:
1578 if renamed:
1580 pl = [renamed + (None,)]
1579 pl = [renamed + (None,)]
1581 else:
1580 else:
1582 pl = [(path, filenode(pcl[0], path), fl)]
1581 pl = [(path, filenode(pcl[0], path), fl)]
1583
1582
1584 for pc in pcl[1:]:
1583 for pc in pcl[1:]:
1585 pl.append((path, filenode(pc, path), fl))
1584 pl.append((path, filenode(pc, path), fl))
1586
1585
1587 return [filectx(self._repo, p, fileid=n, filelog=l)
1586 return [filectx(self._repo, p, fileid=n, filelog=l)
1588 for p, n, l in pl if n != nullid]
1587 for p, n, l in pl if n != nullid]
1589
1588
1590 def children(self):
1589 def children(self):
1591 return []
1590 return []
1592
1591
1593 class workingfilectx(committablefilectx):
1592 class workingfilectx(committablefilectx):
1594 """A workingfilectx object makes access to data related to a particular
1593 """A workingfilectx object makes access to data related to a particular
1595 file in the working directory convenient."""
1594 file in the working directory convenient."""
1596 def __init__(self, repo, path, filelog=None, workingctx=None):
1595 def __init__(self, repo, path, filelog=None, workingctx=None):
1597 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1596 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1598
1597
1599 @propertycache
1598 @propertycache
1600 def _changectx(self):
1599 def _changectx(self):
1601 return workingctx(self._repo)
1600 return workingctx(self._repo)
1602
1601
1603 def data(self):
1602 def data(self):
1604 return self._repo.wread(self._path)
1603 return self._repo.wread(self._path)
1605 def renamed(self):
1604 def renamed(self):
1606 rp = self._repo.dirstate.copied(self._path)
1605 rp = self._repo.dirstate.copied(self._path)
1607 if not rp:
1606 if not rp:
1608 return None
1607 return None
1609 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1608 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1610
1609
1611 def size(self):
1610 def size(self):
1612 return self._repo.wvfs.lstat(self._path).st_size
1611 return self._repo.wvfs.lstat(self._path).st_size
1613 def date(self):
1612 def date(self):
1614 t, tz = self._changectx.date()
1613 t, tz = self._changectx.date()
1615 try:
1614 try:
1616 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1615 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1617 except OSError, err:
1616 except OSError, err:
1618 if err.errno != errno.ENOENT:
1617 if err.errno != errno.ENOENT:
1619 raise
1618 raise
1620 return (t, tz)
1619 return (t, tz)
1621
1620
1622 def cmp(self, fctx):
1621 def cmp(self, fctx):
1623 """compare with other file context
1622 """compare with other file context
1624
1623
1625 returns True if different than fctx.
1624 returns True if different than fctx.
1626 """
1625 """
1627 # fctx should be a filectx (not a workingfilectx)
1626 # fctx should be a filectx (not a workingfilectx)
1628 # invert comparison to reuse the same code path
1627 # invert comparison to reuse the same code path
1629 return fctx.cmp(self)
1628 return fctx.cmp(self)
1630
1629
1631 def remove(self, ignoremissing=False):
1630 def remove(self, ignoremissing=False):
1632 """wraps unlink for a repo's working directory"""
1631 """wraps unlink for a repo's working directory"""
1633 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1632 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1634
1633
1635 def write(self, data, flags):
1634 def write(self, data, flags):
1636 """wraps repo.wwrite"""
1635 """wraps repo.wwrite"""
1637 self._repo.wwrite(self._path, data, flags)
1636 self._repo.wwrite(self._path, data, flags)
1638
1637
1639 class workingcommitctx(workingctx):
1638 class workingcommitctx(workingctx):
1640 """A workingcommitctx object makes access to data related to
1639 """A workingcommitctx object makes access to data related to
1641 the revision being committed convenient.
1640 the revision being committed convenient.
1642
1641
1643 This hides changes in the working directory, if they aren't
1642 This hides changes in the working directory, if they aren't
1644 committed in this context.
1643 committed in this context.
1645 """
1644 """
1646 def __init__(self, repo, changes,
1645 def __init__(self, repo, changes,
1647 text="", user=None, date=None, extra=None):
1646 text="", user=None, date=None, extra=None):
1648 super(workingctx, self).__init__(repo, text, user, date, extra,
1647 super(workingctx, self).__init__(repo, text, user, date, extra,
1649 changes)
1648 changes)
1650
1649
1651 def _buildstatus(self, other, s, match,
1650 def _buildstatus(self, other, s, match,
1652 listignored, listclean, listunknown):
1651 listignored, listclean, listunknown):
1653 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1652 """Prevent ``workingctx._buildstatus`` from changing ``self._status``
1654 """
1653 """
1655 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1654 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1656 if other != self._repo['.']:
1655 if other != self._repo['.']:
1657 # workingctx._buildstatus doesn't change self._status in this case
1656 # workingctx._buildstatus doesn't change self._status in this case
1658 superself = super(workingcommitctx, self)
1657 superself = super(workingcommitctx, self)
1659 s = superself._buildstatus(other, s, match,
1658 s = superself._buildstatus(other, s, match,
1660 listignored, listclean, listunknown)
1659 listignored, listclean, listunknown)
1661 return s
1660 return s
1662
1661
1663 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1662 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1664 unknown=False):
1663 unknown=False):
1665 """Return matched files only in ``self._status``
1664 """Return matched files only in ``self._status``
1666
1665
1667 Uncommitted files appear "clean" via this context, even if
1666 Uncommitted files appear "clean" via this context, even if
1668 they aren't actually so in the working directory.
1667 they aren't actually so in the working directory.
1669 """
1668 """
1670 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1669 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1671 if clean:
1670 if clean:
1672 clean = [f for f in self._manifest if f not in self._changedset]
1671 clean = [f for f in self._manifest if f not in self._changedset]
1673 else:
1672 else:
1674 clean = []
1673 clean = []
1675 return scmutil.status([f for f in self._status.modified if match(f)],
1674 return scmutil.status([f for f in self._status.modified if match(f)],
1676 [f for f in self._status.added if match(f)],
1675 [f for f in self._status.added if match(f)],
1677 [f for f in self._status.removed if match(f)],
1676 [f for f in self._status.removed if match(f)],
1678 [], [], [], clean)
1677 [], [], [], clean)
1679
1678
1680 @propertycache
1679 @propertycache
1681 def _changedset(self):
1680 def _changedset(self):
1682 """Return the set of files changed in this context
1681 """Return the set of files changed in this context
1683 """
1682 """
1684 changed = set(self._status.modified)
1683 changed = set(self._status.modified)
1685 changed.update(self._status.added)
1684 changed.update(self._status.added)
1686 changed.update(self._status.removed)
1685 changed.update(self._status.removed)
1687 return changed
1686 return changed
1688
1687
1689 class memctx(committablectx):
1688 class memctx(committablectx):
1690 """Use memctx to perform in-memory commits via localrepo.commitctx().
1689 """Use memctx to perform in-memory commits via localrepo.commitctx().
1691
1690
1692 Revision information is supplied at initialization time while
1691 Revision information is supplied at initialization time while
1693 related files data and is made available through a callback
1692 related files data and is made available through a callback
1694 mechanism. 'repo' is the current localrepo, 'parents' is a
1693 mechanism. 'repo' is the current localrepo, 'parents' is a
1695 sequence of two parent revisions identifiers (pass None for every
1694 sequence of two parent revisions identifiers (pass None for every
1696 missing parent), 'text' is the commit message and 'files' lists
1695 missing parent), 'text' is the commit message and 'files' lists
1697 names of files touched by the revision (normalized and relative to
1696 names of files touched by the revision (normalized and relative to
1698 repository root).
1697 repository root).
1699
1698
1700 filectxfn(repo, memctx, path) is a callable receiving the
1699 filectxfn(repo, memctx, path) is a callable receiving the
1701 repository, the current memctx object and the normalized path of
1700 repository, the current memctx object and the normalized path of
1702 requested file, relative to repository root. It is fired by the
1701 requested file, relative to repository root. It is fired by the
1703 commit function for every file in 'files', but calls order is
1702 commit function for every file in 'files', but calls order is
1704 undefined. If the file is available in the revision being
1703 undefined. If the file is available in the revision being
1705 committed (updated or added), filectxfn returns a memfilectx
1704 committed (updated or added), filectxfn returns a memfilectx
1706 object. If the file was removed, filectxfn raises an
1705 object. If the file was removed, filectxfn raises an
1707 IOError. Moved files are represented by marking the source file
1706 IOError. Moved files are represented by marking the source file
1708 removed and the new file added with copy information (see
1707 removed and the new file added with copy information (see
1709 memfilectx).
1708 memfilectx).
1710
1709
1711 user receives the committer name and defaults to current
1710 user receives the committer name and defaults to current
1712 repository username, date is the commit date in any format
1711 repository username, date is the commit date in any format
1713 supported by util.parsedate() and defaults to current date, extra
1712 supported by util.parsedate() and defaults to current date, extra
1714 is a dictionary of metadata or is left empty.
1713 is a dictionary of metadata or is left empty.
1715 """
1714 """
1716
1715
1717 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1716 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1718 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1717 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1719 # this field to determine what to do in filectxfn.
1718 # this field to determine what to do in filectxfn.
1720 _returnnoneformissingfiles = True
1719 _returnnoneformissingfiles = True
1721
1720
1722 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1721 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1723 date=None, extra=None, editor=False):
1722 date=None, extra=None, editor=False):
1724 super(memctx, self).__init__(repo, text, user, date, extra)
1723 super(memctx, self).__init__(repo, text, user, date, extra)
1725 self._rev = None
1724 self._rev = None
1726 self._node = None
1725 self._node = None
1727 parents = [(p or nullid) for p in parents]
1726 parents = [(p or nullid) for p in parents]
1728 p1, p2 = parents
1727 p1, p2 = parents
1729 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1728 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1730 files = sorted(set(files))
1729 files = sorted(set(files))
1731 self._files = files
1730 self._files = files
1732 self.substate = {}
1731 self.substate = {}
1733
1732
1734 # if store is not callable, wrap it in a function
1733 # if store is not callable, wrap it in a function
1735 if not callable(filectxfn):
1734 if not callable(filectxfn):
1736 def getfilectx(repo, memctx, path):
1735 def getfilectx(repo, memctx, path):
1737 fctx = filectxfn[path]
1736 fctx = filectxfn[path]
1738 # this is weird but apparently we only keep track of one parent
1737 # this is weird but apparently we only keep track of one parent
1739 # (why not only store that instead of a tuple?)
1738 # (why not only store that instead of a tuple?)
1740 copied = fctx.renamed()
1739 copied = fctx.renamed()
1741 if copied:
1740 if copied:
1742 copied = copied[0]
1741 copied = copied[0]
1743 return memfilectx(repo, path, fctx.data(),
1742 return memfilectx(repo, path, fctx.data(),
1744 islink=fctx.islink(), isexec=fctx.isexec(),
1743 islink=fctx.islink(), isexec=fctx.isexec(),
1745 copied=copied, memctx=memctx)
1744 copied=copied, memctx=memctx)
1746 self._filectxfn = getfilectx
1745 self._filectxfn = getfilectx
1747 else:
1746 else:
1748 # "util.cachefunc" reduces invocation of possibly expensive
1747 # "util.cachefunc" reduces invocation of possibly expensive
1749 # "filectxfn" for performance (e.g. converting from another VCS)
1748 # "filectxfn" for performance (e.g. converting from another VCS)
1750 self._filectxfn = util.cachefunc(filectxfn)
1749 self._filectxfn = util.cachefunc(filectxfn)
1751
1750
1752 self._extra = extra and extra.copy() or {}
1751 self._extra = extra and extra.copy() or {}
1753 if self._extra.get('branch', '') == '':
1752 if self._extra.get('branch', '') == '':
1754 self._extra['branch'] = 'default'
1753 self._extra['branch'] = 'default'
1755
1754
1756 if editor:
1755 if editor:
1757 self._text = editor(self._repo, self, [])
1756 self._text = editor(self._repo, self, [])
1758 self._repo.savecommitmessage(self._text)
1757 self._repo.savecommitmessage(self._text)
1759
1758
1760 def filectx(self, path, filelog=None):
1759 def filectx(self, path, filelog=None):
1761 """get a file context from the working directory
1760 """get a file context from the working directory
1762
1761
1763 Returns None if file doesn't exist and should be removed."""
1762 Returns None if file doesn't exist and should be removed."""
1764 return self._filectxfn(self._repo, self, path)
1763 return self._filectxfn(self._repo, self, path)
1765
1764
1766 def commit(self):
1765 def commit(self):
1767 """commit context to the repo"""
1766 """commit context to the repo"""
1768 return self._repo.commitctx(self)
1767 return self._repo.commitctx(self)
1769
1768
1770 @propertycache
1769 @propertycache
1771 def _manifest(self):
1770 def _manifest(self):
1772 """generate a manifest based on the return values of filectxfn"""
1771 """generate a manifest based on the return values of filectxfn"""
1773
1772
1774 # keep this simple for now; just worry about p1
1773 # keep this simple for now; just worry about p1
1775 pctx = self._parents[0]
1774 pctx = self._parents[0]
1776 man = pctx.manifest().copy()
1775 man = pctx.manifest().copy()
1777
1776
1778 for f in self._status.modified:
1777 for f in self._status.modified:
1779 p1node = nullid
1778 p1node = nullid
1780 p2node = nullid
1779 p2node = nullid
1781 p = pctx[f].parents() # if file isn't in pctx, check p2?
1780 p = pctx[f].parents() # if file isn't in pctx, check p2?
1782 if len(p) > 0:
1781 if len(p) > 0:
1783 p1node = p[0].node()
1782 p1node = p[0].node()
1784 if len(p) > 1:
1783 if len(p) > 1:
1785 p2node = p[1].node()
1784 p2node = p[1].node()
1786 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1785 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1787
1786
1788 for f in self._status.added:
1787 for f in self._status.added:
1789 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1788 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1790
1789
1791 for f in self._status.removed:
1790 for f in self._status.removed:
1792 if f in man:
1791 if f in man:
1793 del man[f]
1792 del man[f]
1794
1793
1795 return man
1794 return man
1796
1795
1797 @propertycache
1796 @propertycache
1798 def _status(self):
1797 def _status(self):
1799 """Calculate exact status from ``files`` specified at construction
1798 """Calculate exact status from ``files`` specified at construction
1800 """
1799 """
1801 man1 = self.p1().manifest()
1800 man1 = self.p1().manifest()
1802 p2 = self._parents[1]
1801 p2 = self._parents[1]
1803 # "1 < len(self._parents)" can't be used for checking
1802 # "1 < len(self._parents)" can't be used for checking
1804 # existence of the 2nd parent, because "memctx._parents" is
1803 # existence of the 2nd parent, because "memctx._parents" is
1805 # explicitly initialized by the list, of which length is 2.
1804 # explicitly initialized by the list, of which length is 2.
1806 if p2.node() != nullid:
1805 if p2.node() != nullid:
1807 man2 = p2.manifest()
1806 man2 = p2.manifest()
1808 managing = lambda f: f in man1 or f in man2
1807 managing = lambda f: f in man1 or f in man2
1809 else:
1808 else:
1810 managing = lambda f: f in man1
1809 managing = lambda f: f in man1
1811
1810
1812 modified, added, removed = [], [], []
1811 modified, added, removed = [], [], []
1813 for f in self._files:
1812 for f in self._files:
1814 if not managing(f):
1813 if not managing(f):
1815 added.append(f)
1814 added.append(f)
1816 elif self[f]:
1815 elif self[f]:
1817 modified.append(f)
1816 modified.append(f)
1818 else:
1817 else:
1819 removed.append(f)
1818 removed.append(f)
1820
1819
1821 return scmutil.status(modified, added, removed, [], [], [], [])
1820 return scmutil.status(modified, added, removed, [], [], [], [])
1822
1821
1823 class memfilectx(committablefilectx):
1822 class memfilectx(committablefilectx):
1824 """memfilectx represents an in-memory file to commit.
1823 """memfilectx represents an in-memory file to commit.
1825
1824
1826 See memctx and committablefilectx for more details.
1825 See memctx and committablefilectx for more details.
1827 """
1826 """
1828 def __init__(self, repo, path, data, islink=False,
1827 def __init__(self, repo, path, data, islink=False,
1829 isexec=False, copied=None, memctx=None):
1828 isexec=False, copied=None, memctx=None):
1830 """
1829 """
1831 path is the normalized file path relative to repository root.
1830 path is the normalized file path relative to repository root.
1832 data is the file content as a string.
1831 data is the file content as a string.
1833 islink is True if the file is a symbolic link.
1832 islink is True if the file is a symbolic link.
1834 isexec is True if the file is executable.
1833 isexec is True if the file is executable.
1835 copied is the source file path if current file was copied in the
1834 copied is the source file path if current file was copied in the
1836 revision being committed, or None."""
1835 revision being committed, or None."""
1837 super(memfilectx, self).__init__(repo, path, None, memctx)
1836 super(memfilectx, self).__init__(repo, path, None, memctx)
1838 self._data = data
1837 self._data = data
1839 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1838 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1840 self._copied = None
1839 self._copied = None
1841 if copied:
1840 if copied:
1842 self._copied = (copied, nullid)
1841 self._copied = (copied, nullid)
1843
1842
1844 def data(self):
1843 def data(self):
1845 return self._data
1844 return self._data
1846 def size(self):
1845 def size(self):
1847 return len(self.data())
1846 return len(self.data())
1848 def flags(self):
1847 def flags(self):
1849 return self._flags
1848 return self._flags
1850 def renamed(self):
1849 def renamed(self):
1851 return self._copied
1850 return self._copied
1852
1851
1853 def remove(self, ignoremissing=False):
1852 def remove(self, ignoremissing=False):
1854 """wraps unlink for a repo's working directory"""
1853 """wraps unlink for a repo's working directory"""
1855 # need to figure out what to do here
1854 # need to figure out what to do here
1856 del self._changectx[self._path]
1855 del self._changectx[self._path]
1857
1856
1858 def write(self, data, flags):
1857 def write(self, data, flags):
1859 """wraps repo.wwrite"""
1858 """wraps repo.wwrite"""
1860 self._data = data
1859 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now