##// END OF EJS Templates
filectx: use _descendantrev in parents()...
Matt Mackall -
r23984:2896f535 stable
parent child Browse files
Show More
@@ -1,1862 +1,1866
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 for f in sorted(self._manifest):
69 for f in sorted(self._manifest):
70 yield f
70 yield f
71
71
72 def _manifestmatches(self, match, s):
72 def _manifestmatches(self, match, s):
73 """generate a new manifest filtered by the match argument
73 """generate a new manifest filtered by the match argument
74
74
75 This method is for internal use only and mainly exists to provide an
75 This method is for internal use only and mainly exists to provide an
76 object oriented way for other contexts to customize the manifest
76 object oriented way for other contexts to customize the manifest
77 generation.
77 generation.
78 """
78 """
79 return self.manifest().matches(match)
79 return self.manifest().matches(match)
80
80
81 def _matchstatus(self, other, match):
81 def _matchstatus(self, other, match):
82 """return match.always if match is none
82 """return match.always if match is none
83
83
84 This internal method provides a way for child objects to override the
84 This internal method provides a way for child objects to override the
85 match operator.
85 match operator.
86 """
86 """
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88
88
89 def _buildstatus(self, other, s, match, listignored, listclean,
89 def _buildstatus(self, other, s, match, listignored, listclean,
90 listunknown):
90 listunknown):
91 """build a status with respect to another context"""
91 """build a status with respect to another context"""
92 # Load earliest manifest first for caching reasons. More specifically,
92 # Load earliest manifest first for caching reasons. More specifically,
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta to what's in the cache. So that's one full reconstruction + one
97 # delta application.
97 # delta application.
98 if self.rev() is not None and self.rev() < other.rev():
98 if self.rev() is not None and self.rev() < other.rev():
99 self.manifest()
99 self.manifest()
100 mf1 = other._manifestmatches(match, s)
100 mf1 = other._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
102
102
103 modified, added = [], []
103 modified, added = [], []
104 removed = []
104 removed = []
105 clean = []
105 clean = []
106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
107 deletedset = set(deleted)
107 deletedset = set(deleted)
108 d = mf1.diff(mf2, clean=listclean)
108 d = mf1.diff(mf2, clean=listclean)
109 for fn, value in d.iteritems():
109 for fn, value in d.iteritems():
110 if fn in deletedset:
110 if fn in deletedset:
111 continue
111 continue
112 if value is None:
112 if value is None:
113 clean.append(fn)
113 clean.append(fn)
114 continue
114 continue
115 (node1, flag1), (node2, flag2) = value
115 (node1, flag1), (node2, flag2) = value
116 if node1 is None:
116 if node1 is None:
117 added.append(fn)
117 added.append(fn)
118 elif node2 is None:
118 elif node2 is None:
119 removed.append(fn)
119 removed.append(fn)
120 elif node2 != _newnode:
120 elif node2 != _newnode:
121 # The file was not a new file in mf2, so an entry
121 # The file was not a new file in mf2, so an entry
122 # from diff is really a difference.
122 # from diff is really a difference.
123 modified.append(fn)
123 modified.append(fn)
124 elif self[fn].cmp(other[fn]):
124 elif self[fn].cmp(other[fn]):
125 # node2 was newnode, but the working file doesn't
125 # node2 was newnode, but the working file doesn't
126 # match the one in mf1.
126 # match the one in mf1.
127 modified.append(fn)
127 modified.append(fn)
128 else:
128 else:
129 clean.append(fn)
129 clean.append(fn)
130
130
131 if removed:
131 if removed:
132 # need to filter files if they are already reported as removed
132 # need to filter files if they are already reported as removed
133 unknown = [fn for fn in unknown if fn not in mf1]
133 unknown = [fn for fn in unknown if fn not in mf1]
134 ignored = [fn for fn in ignored if fn not in mf1]
134 ignored = [fn for fn in ignored if fn not in mf1]
135 # if they're deleted, don't report them as removed
135 # if they're deleted, don't report them as removed
136 removed = [fn for fn in removed if fn not in deletedset]
136 removed = [fn for fn in removed if fn not in deletedset]
137
137
138 return scmutil.status(modified, added, removed, deleted, unknown,
138 return scmutil.status(modified, added, removed, deleted, unknown,
139 ignored, clean)
139 ignored, clean)
140
140
141 @propertycache
141 @propertycache
142 def substate(self):
142 def substate(self):
143 return subrepo.state(self, self._repo.ui)
143 return subrepo.state(self, self._repo.ui)
144
144
145 def subrev(self, subpath):
145 def subrev(self, subpath):
146 return self.substate[subpath][1]
146 return self.substate[subpath][1]
147
147
148 def rev(self):
148 def rev(self):
149 return self._rev
149 return self._rev
150 def node(self):
150 def node(self):
151 return self._node
151 return self._node
152 def hex(self):
152 def hex(self):
153 return hex(self.node())
153 return hex(self.node())
154 def manifest(self):
154 def manifest(self):
155 return self._manifest
155 return self._manifest
156 def phasestr(self):
156 def phasestr(self):
157 return phases.phasenames[self.phase()]
157 return phases.phasenames[self.phase()]
158 def mutable(self):
158 def mutable(self):
159 return self.phase() > phases.public
159 return self.phase() > phases.public
160
160
161 def getfileset(self, expr):
161 def getfileset(self, expr):
162 return fileset.getfileset(self, expr)
162 return fileset.getfileset(self, expr)
163
163
164 def obsolete(self):
164 def obsolete(self):
165 """True if the changeset is obsolete"""
165 """True if the changeset is obsolete"""
166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167
167
168 def extinct(self):
168 def extinct(self):
169 """True if the changeset is extinct"""
169 """True if the changeset is extinct"""
170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171
171
172 def unstable(self):
172 def unstable(self):
173 """True if the changeset is not obsolete but it's ancestor are"""
173 """True if the changeset is not obsolete but it's ancestor are"""
174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175
175
176 def bumped(self):
176 def bumped(self):
177 """True if the changeset try to be a successor of a public changeset
177 """True if the changeset try to be a successor of a public changeset
178
178
179 Only non-public and non-obsolete changesets may be bumped.
179 Only non-public and non-obsolete changesets may be bumped.
180 """
180 """
181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182
182
183 def divergent(self):
183 def divergent(self):
184 """Is a successors of a changeset with multiple possible successors set
184 """Is a successors of a changeset with multiple possible successors set
185
185
186 Only non-public and non-obsolete changesets may be divergent.
186 Only non-public and non-obsolete changesets may be divergent.
187 """
187 """
188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189
189
190 def troubled(self):
190 def troubled(self):
191 """True if the changeset is either unstable, bumped or divergent"""
191 """True if the changeset is either unstable, bumped or divergent"""
192 return self.unstable() or self.bumped() or self.divergent()
192 return self.unstable() or self.bumped() or self.divergent()
193
193
194 def troubles(self):
194 def troubles(self):
195 """return the list of troubles affecting this changesets.
195 """return the list of troubles affecting this changesets.
196
196
197 Troubles are returned as strings. possible values are:
197 Troubles are returned as strings. possible values are:
198 - unstable,
198 - unstable,
199 - bumped,
199 - bumped,
200 - divergent.
200 - divergent.
201 """
201 """
202 troubles = []
202 troubles = []
203 if self.unstable():
203 if self.unstable():
204 troubles.append('unstable')
204 troubles.append('unstable')
205 if self.bumped():
205 if self.bumped():
206 troubles.append('bumped')
206 troubles.append('bumped')
207 if self.divergent():
207 if self.divergent():
208 troubles.append('divergent')
208 troubles.append('divergent')
209 return troubles
209 return troubles
210
210
211 def parents(self):
211 def parents(self):
212 """return contexts for each parent changeset"""
212 """return contexts for each parent changeset"""
213 return self._parents
213 return self._parents
214
214
215 def p1(self):
215 def p1(self):
216 return self._parents[0]
216 return self._parents[0]
217
217
218 def p2(self):
218 def p2(self):
219 if len(self._parents) == 2:
219 if len(self._parents) == 2:
220 return self._parents[1]
220 return self._parents[1]
221 return changectx(self._repo, -1)
221 return changectx(self._repo, -1)
222
222
223 def _fileinfo(self, path):
223 def _fileinfo(self, path):
224 if '_manifest' in self.__dict__:
224 if '_manifest' in self.__dict__:
225 try:
225 try:
226 return self._manifest[path], self._manifest.flags(path)
226 return self._manifest[path], self._manifest.flags(path)
227 except KeyError:
227 except KeyError:
228 raise error.ManifestLookupError(self._node, path,
228 raise error.ManifestLookupError(self._node, path,
229 _('not found in manifest'))
229 _('not found in manifest'))
230 if '_manifestdelta' in self.__dict__ or path in self.files():
230 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if path in self._manifestdelta:
231 if path in self._manifestdelta:
232 return (self._manifestdelta[path],
232 return (self._manifestdelta[path],
233 self._manifestdelta.flags(path))
233 self._manifestdelta.flags(path))
234 node, flag = self._repo.manifest.find(self._changeset[0], path)
234 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 if not node:
235 if not node:
236 raise error.ManifestLookupError(self._node, path,
236 raise error.ManifestLookupError(self._node, path,
237 _('not found in manifest'))
237 _('not found in manifest'))
238
238
239 return node, flag
239 return node, flag
240
240
241 def filenode(self, path):
241 def filenode(self, path):
242 return self._fileinfo(path)[0]
242 return self._fileinfo(path)[0]
243
243
244 def flags(self, path):
244 def flags(self, path):
245 try:
245 try:
246 return self._fileinfo(path)[1]
246 return self._fileinfo(path)[1]
247 except error.LookupError:
247 except error.LookupError:
248 return ''
248 return ''
249
249
250 def sub(self, path):
250 def sub(self, path):
251 return subrepo.subrepo(self, path)
251 return subrepo.subrepo(self, path)
252
252
253 def match(self, pats=[], include=None, exclude=None, default='glob'):
253 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 r = self._repo
254 r = self._repo
255 return matchmod.match(r.root, r.getcwd(), pats,
255 return matchmod.match(r.root, r.getcwd(), pats,
256 include, exclude, default,
256 include, exclude, default,
257 auditor=r.auditor, ctx=self)
257 auditor=r.auditor, ctx=self)
258
258
259 def diff(self, ctx2=None, match=None, **opts):
259 def diff(self, ctx2=None, match=None, **opts):
260 """Returns a diff generator for the given contexts and matcher"""
260 """Returns a diff generator for the given contexts and matcher"""
261 if ctx2 is None:
261 if ctx2 is None:
262 ctx2 = self.p1()
262 ctx2 = self.p1()
263 if ctx2 is not None:
263 if ctx2 is not None:
264 ctx2 = self._repo[ctx2]
264 ctx2 = self._repo[ctx2]
265 diffopts = patch.diffopts(self._repo.ui, opts)
265 diffopts = patch.diffopts(self._repo.ui, opts)
266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267
267
268 @propertycache
268 @propertycache
269 def _dirs(self):
269 def _dirs(self):
270 return scmutil.dirs(self._manifest)
270 return scmutil.dirs(self._manifest)
271
271
272 def dirs(self):
272 def dirs(self):
273 return self._dirs
273 return self._dirs
274
274
275 def dirty(self, missing=False, merge=True, branch=True):
275 def dirty(self, missing=False, merge=True, branch=True):
276 return False
276 return False
277
277
278 def status(self, other=None, match=None, listignored=False,
278 def status(self, other=None, match=None, listignored=False,
279 listclean=False, listunknown=False, listsubrepos=False):
279 listclean=False, listunknown=False, listsubrepos=False):
280 """return status of files between two nodes or node and working
280 """return status of files between two nodes or node and working
281 directory.
281 directory.
282
282
283 If other is None, compare this node with working directory.
283 If other is None, compare this node with working directory.
284
284
285 returns (modified, added, removed, deleted, unknown, ignored, clean)
285 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 """
286 """
287
287
288 ctx1 = self
288 ctx1 = self
289 ctx2 = self._repo[other]
289 ctx2 = self._repo[other]
290
290
291 # This next code block is, admittedly, fragile logic that tests for
291 # This next code block is, admittedly, fragile logic that tests for
292 # reversing the contexts and wouldn't need to exist if it weren't for
292 # reversing the contexts and wouldn't need to exist if it weren't for
293 # the fast (and common) code path of comparing the working directory
293 # the fast (and common) code path of comparing the working directory
294 # with its first parent.
294 # with its first parent.
295 #
295 #
296 # What we're aiming for here is the ability to call:
296 # What we're aiming for here is the ability to call:
297 #
297 #
298 # workingctx.status(parentctx)
298 # workingctx.status(parentctx)
299 #
299 #
300 # If we always built the manifest for each context and compared those,
300 # If we always built the manifest for each context and compared those,
301 # then we'd be done. But the special case of the above call means we
301 # then we'd be done. But the special case of the above call means we
302 # just copy the manifest of the parent.
302 # just copy the manifest of the parent.
303 reversed = False
303 reversed = False
304 if (not isinstance(ctx1, changectx)
304 if (not isinstance(ctx1, changectx)
305 and isinstance(ctx2, changectx)):
305 and isinstance(ctx2, changectx)):
306 reversed = True
306 reversed = True
307 ctx1, ctx2 = ctx2, ctx1
307 ctx1, ctx2 = ctx2, ctx1
308
308
309 match = ctx2._matchstatus(ctx1, match)
309 match = ctx2._matchstatus(ctx1, match)
310 r = scmutil.status([], [], [], [], [], [], [])
310 r = scmutil.status([], [], [], [], [], [], [])
311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 listunknown)
312 listunknown)
313
313
314 if reversed:
314 if reversed:
315 # Reverse added and removed. Clear deleted, unknown and ignored as
315 # Reverse added and removed. Clear deleted, unknown and ignored as
316 # these make no sense to reverse.
316 # these make no sense to reverse.
317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 r.clean)
318 r.clean)
319
319
320 if listsubrepos:
320 if listsubrepos:
321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 rev2 = ctx2.subrev(subpath)
322 rev2 = ctx2.subrev(subpath)
323 try:
323 try:
324 submatch = matchmod.narrowmatcher(subpath, match)
324 submatch = matchmod.narrowmatcher(subpath, match)
325 s = sub.status(rev2, match=submatch, ignored=listignored,
325 s = sub.status(rev2, match=submatch, ignored=listignored,
326 clean=listclean, unknown=listunknown,
326 clean=listclean, unknown=listunknown,
327 listsubrepos=True)
327 listsubrepos=True)
328 for rfiles, sfiles in zip(r, s):
328 for rfiles, sfiles in zip(r, s):
329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 except error.LookupError:
330 except error.LookupError:
331 self._repo.ui.status(_("skipping missing "
331 self._repo.ui.status(_("skipping missing "
332 "subrepository: %s\n") % subpath)
332 "subrepository: %s\n") % subpath)
333
333
334 for l in r:
334 for l in r:
335 l.sort()
335 l.sort()
336
336
337 return r
337 return r
338
338
339
339
340 def makememctx(repo, parents, text, user, date, branch, files, store,
340 def makememctx(repo, parents, text, user, date, branch, files, store,
341 editor=None):
341 editor=None):
342 def getfilectx(repo, memctx, path):
342 def getfilectx(repo, memctx, path):
343 data, mode, copied = store.getfile(path)
343 data, mode, copied = store.getfile(path)
344 if data is None:
344 if data is None:
345 return None
345 return None
346 islink, isexec = mode
346 islink, isexec = mode
347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 copied=copied, memctx=memctx)
348 copied=copied, memctx=memctx)
349 extra = {}
349 extra = {}
350 if branch:
350 if branch:
351 extra['branch'] = encoding.fromlocal(branch)
351 extra['branch'] = encoding.fromlocal(branch)
352 ctx = memctx(repo, parents, text, files, getfilectx, user,
352 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 date, extra, editor)
353 date, extra, editor)
354 return ctx
354 return ctx
355
355
356 class changectx(basectx):
356 class changectx(basectx):
357 """A changecontext object makes access to data related to a particular
357 """A changecontext object makes access to data related to a particular
358 changeset convenient. It represents a read-only context already present in
358 changeset convenient. It represents a read-only context already present in
359 the repo."""
359 the repo."""
360 def __init__(self, repo, changeid=''):
360 def __init__(self, repo, changeid=''):
361 """changeid is a revision number, node, or tag"""
361 """changeid is a revision number, node, or tag"""
362
362
363 # since basectx.__new__ already took care of copying the object, we
363 # since basectx.__new__ already took care of copying the object, we
364 # don't need to do anything in __init__, so we just exit here
364 # don't need to do anything in __init__, so we just exit here
365 if isinstance(changeid, basectx):
365 if isinstance(changeid, basectx):
366 return
366 return
367
367
368 if changeid == '':
368 if changeid == '':
369 changeid = '.'
369 changeid = '.'
370 self._repo = repo
370 self._repo = repo
371
371
372 try:
372 try:
373 if isinstance(changeid, int):
373 if isinstance(changeid, int):
374 self._node = repo.changelog.node(changeid)
374 self._node = repo.changelog.node(changeid)
375 self._rev = changeid
375 self._rev = changeid
376 return
376 return
377 if isinstance(changeid, long):
377 if isinstance(changeid, long):
378 changeid = str(changeid)
378 changeid = str(changeid)
379 if changeid == '.':
379 if changeid == '.':
380 self._node = repo.dirstate.p1()
380 self._node = repo.dirstate.p1()
381 self._rev = repo.changelog.rev(self._node)
381 self._rev = repo.changelog.rev(self._node)
382 return
382 return
383 if changeid == 'null':
383 if changeid == 'null':
384 self._node = nullid
384 self._node = nullid
385 self._rev = nullrev
385 self._rev = nullrev
386 return
386 return
387 if changeid == 'tip':
387 if changeid == 'tip':
388 self._node = repo.changelog.tip()
388 self._node = repo.changelog.tip()
389 self._rev = repo.changelog.rev(self._node)
389 self._rev = repo.changelog.rev(self._node)
390 return
390 return
391 if len(changeid) == 20:
391 if len(changeid) == 20:
392 try:
392 try:
393 self._node = changeid
393 self._node = changeid
394 self._rev = repo.changelog.rev(changeid)
394 self._rev = repo.changelog.rev(changeid)
395 return
395 return
396 except error.FilteredRepoLookupError:
396 except error.FilteredRepoLookupError:
397 raise
397 raise
398 except LookupError:
398 except LookupError:
399 pass
399 pass
400
400
401 try:
401 try:
402 r = int(changeid)
402 r = int(changeid)
403 if str(r) != changeid:
403 if str(r) != changeid:
404 raise ValueError
404 raise ValueError
405 l = len(repo.changelog)
405 l = len(repo.changelog)
406 if r < 0:
406 if r < 0:
407 r += l
407 r += l
408 if r < 0 or r >= l:
408 if r < 0 or r >= l:
409 raise ValueError
409 raise ValueError
410 self._rev = r
410 self._rev = r
411 self._node = repo.changelog.node(r)
411 self._node = repo.changelog.node(r)
412 return
412 return
413 except error.FilteredIndexError:
413 except error.FilteredIndexError:
414 raise
414 raise
415 except (ValueError, OverflowError, IndexError):
415 except (ValueError, OverflowError, IndexError):
416 pass
416 pass
417
417
418 if len(changeid) == 40:
418 if len(changeid) == 40:
419 try:
419 try:
420 self._node = bin(changeid)
420 self._node = bin(changeid)
421 self._rev = repo.changelog.rev(self._node)
421 self._rev = repo.changelog.rev(self._node)
422 return
422 return
423 except error.FilteredLookupError:
423 except error.FilteredLookupError:
424 raise
424 raise
425 except (TypeError, LookupError):
425 except (TypeError, LookupError):
426 pass
426 pass
427
427
428 # lookup bookmarks through the name interface
428 # lookup bookmarks through the name interface
429 try:
429 try:
430 self._node = repo.names.singlenode(repo, changeid)
430 self._node = repo.names.singlenode(repo, changeid)
431 self._rev = repo.changelog.rev(self._node)
431 self._rev = repo.changelog.rev(self._node)
432 return
432 return
433 except KeyError:
433 except KeyError:
434 pass
434 pass
435 except error.FilteredRepoLookupError:
435 except error.FilteredRepoLookupError:
436 raise
436 raise
437 except error.RepoLookupError:
437 except error.RepoLookupError:
438 pass
438 pass
439
439
440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
441 if self._node is not None:
441 if self._node is not None:
442 self._rev = repo.changelog.rev(self._node)
442 self._rev = repo.changelog.rev(self._node)
443 return
443 return
444
444
445 # lookup failed
445 # lookup failed
446 # check if it might have come from damaged dirstate
446 # check if it might have come from damaged dirstate
447 #
447 #
448 # XXX we could avoid the unfiltered if we had a recognizable
448 # XXX we could avoid the unfiltered if we had a recognizable
449 # exception for filtered changeset access
449 # exception for filtered changeset access
450 if changeid in repo.unfiltered().dirstate.parents():
450 if changeid in repo.unfiltered().dirstate.parents():
451 msg = _("working directory has unknown parent '%s'!")
451 msg = _("working directory has unknown parent '%s'!")
452 raise error.Abort(msg % short(changeid))
452 raise error.Abort(msg % short(changeid))
453 try:
453 try:
454 if len(changeid) == 20:
454 if len(changeid) == 20:
455 changeid = hex(changeid)
455 changeid = hex(changeid)
456 except TypeError:
456 except TypeError:
457 pass
457 pass
458 except (error.FilteredIndexError, error.FilteredLookupError,
458 except (error.FilteredIndexError, error.FilteredLookupError,
459 error.FilteredRepoLookupError):
459 error.FilteredRepoLookupError):
460 if repo.filtername == 'visible':
460 if repo.filtername == 'visible':
461 msg = _("hidden revision '%s'") % changeid
461 msg = _("hidden revision '%s'") % changeid
462 hint = _('use --hidden to access hidden revisions')
462 hint = _('use --hidden to access hidden revisions')
463 raise error.FilteredRepoLookupError(msg, hint=hint)
463 raise error.FilteredRepoLookupError(msg, hint=hint)
464 msg = _("filtered revision '%s' (not in '%s' subset)")
464 msg = _("filtered revision '%s' (not in '%s' subset)")
465 msg %= (changeid, repo.filtername)
465 msg %= (changeid, repo.filtername)
466 raise error.FilteredRepoLookupError(msg)
466 raise error.FilteredRepoLookupError(msg)
467 except IndexError:
467 except IndexError:
468 pass
468 pass
469 raise error.RepoLookupError(
469 raise error.RepoLookupError(
470 _("unknown revision '%s'") % changeid)
470 _("unknown revision '%s'") % changeid)
471
471
472 def __hash__(self):
472 def __hash__(self):
473 try:
473 try:
474 return hash(self._rev)
474 return hash(self._rev)
475 except AttributeError:
475 except AttributeError:
476 return id(self)
476 return id(self)
477
477
478 def __nonzero__(self):
478 def __nonzero__(self):
479 return self._rev != nullrev
479 return self._rev != nullrev
480
480
481 @propertycache
481 @propertycache
482 def _changeset(self):
482 def _changeset(self):
483 return self._repo.changelog.read(self.rev())
483 return self._repo.changelog.read(self.rev())
484
484
485 @propertycache
485 @propertycache
486 def _manifest(self):
486 def _manifest(self):
487 return self._repo.manifest.read(self._changeset[0])
487 return self._repo.manifest.read(self._changeset[0])
488
488
489 @propertycache
489 @propertycache
490 def _manifestdelta(self):
490 def _manifestdelta(self):
491 return self._repo.manifest.readdelta(self._changeset[0])
491 return self._repo.manifest.readdelta(self._changeset[0])
492
492
493 @propertycache
493 @propertycache
494 def _parents(self):
494 def _parents(self):
495 p = self._repo.changelog.parentrevs(self._rev)
495 p = self._repo.changelog.parentrevs(self._rev)
496 if p[1] == nullrev:
496 if p[1] == nullrev:
497 p = p[:-1]
497 p = p[:-1]
498 return [changectx(self._repo, x) for x in p]
498 return [changectx(self._repo, x) for x in p]
499
499
500 def changeset(self):
500 def changeset(self):
501 return self._changeset
501 return self._changeset
502 def manifestnode(self):
502 def manifestnode(self):
503 return self._changeset[0]
503 return self._changeset[0]
504
504
505 def user(self):
505 def user(self):
506 return self._changeset[1]
506 return self._changeset[1]
507 def date(self):
507 def date(self):
508 return self._changeset[2]
508 return self._changeset[2]
509 def files(self):
509 def files(self):
510 return self._changeset[3]
510 return self._changeset[3]
511 def description(self):
511 def description(self):
512 return self._changeset[4]
512 return self._changeset[4]
513 def branch(self):
513 def branch(self):
514 return encoding.tolocal(self._changeset[5].get("branch"))
514 return encoding.tolocal(self._changeset[5].get("branch"))
515 def closesbranch(self):
515 def closesbranch(self):
516 return 'close' in self._changeset[5]
516 return 'close' in self._changeset[5]
517 def extra(self):
517 def extra(self):
518 return self._changeset[5]
518 return self._changeset[5]
519 def tags(self):
519 def tags(self):
520 return self._repo.nodetags(self._node)
520 return self._repo.nodetags(self._node)
521 def bookmarks(self):
521 def bookmarks(self):
522 return self._repo.nodebookmarks(self._node)
522 return self._repo.nodebookmarks(self._node)
523 def phase(self):
523 def phase(self):
524 return self._repo._phasecache.phase(self._repo, self._rev)
524 return self._repo._phasecache.phase(self._repo, self._rev)
525 def hidden(self):
525 def hidden(self):
526 return self._rev in repoview.filterrevs(self._repo, 'visible')
526 return self._rev in repoview.filterrevs(self._repo, 'visible')
527
527
528 def children(self):
528 def children(self):
529 """return contexts for each child changeset"""
529 """return contexts for each child changeset"""
530 c = self._repo.changelog.children(self._node)
530 c = self._repo.changelog.children(self._node)
531 return [changectx(self._repo, x) for x in c]
531 return [changectx(self._repo, x) for x in c]
532
532
533 def ancestors(self):
533 def ancestors(self):
534 for a in self._repo.changelog.ancestors([self._rev]):
534 for a in self._repo.changelog.ancestors([self._rev]):
535 yield changectx(self._repo, a)
535 yield changectx(self._repo, a)
536
536
537 def descendants(self):
537 def descendants(self):
538 for d in self._repo.changelog.descendants([self._rev]):
538 for d in self._repo.changelog.descendants([self._rev]):
539 yield changectx(self._repo, d)
539 yield changectx(self._repo, d)
540
540
541 def filectx(self, path, fileid=None, filelog=None):
541 def filectx(self, path, fileid=None, filelog=None):
542 """get a file context from this changeset"""
542 """get a file context from this changeset"""
543 if fileid is None:
543 if fileid is None:
544 fileid = self.filenode(path)
544 fileid = self.filenode(path)
545 return filectx(self._repo, path, fileid=fileid,
545 return filectx(self._repo, path, fileid=fileid,
546 changectx=self, filelog=filelog)
546 changectx=self, filelog=filelog)
547
547
548 def ancestor(self, c2, warn=False):
548 def ancestor(self, c2, warn=False):
549 """return the "best" ancestor context of self and c2
549 """return the "best" ancestor context of self and c2
550
550
551 If there are multiple candidates, it will show a message and check
551 If there are multiple candidates, it will show a message and check
552 merge.preferancestor configuration before falling back to the
552 merge.preferancestor configuration before falling back to the
553 revlog ancestor."""
553 revlog ancestor."""
554 # deal with workingctxs
554 # deal with workingctxs
555 n2 = c2._node
555 n2 = c2._node
556 if n2 is None:
556 if n2 is None:
557 n2 = c2._parents[0]._node
557 n2 = c2._parents[0]._node
558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 if not cahs:
559 if not cahs:
560 anc = nullid
560 anc = nullid
561 elif len(cahs) == 1:
561 elif len(cahs) == 1:
562 anc = cahs[0]
562 anc = cahs[0]
563 else:
563 else:
564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 try:
565 try:
566 ctx = changectx(self._repo, r)
566 ctx = changectx(self._repo, r)
567 except error.RepoLookupError:
567 except error.RepoLookupError:
568 continue
568 continue
569 anc = ctx.node()
569 anc = ctx.node()
570 if anc in cahs:
570 if anc in cahs:
571 break
571 break
572 else:
572 else:
573 anc = self._repo.changelog.ancestor(self._node, n2)
573 anc = self._repo.changelog.ancestor(self._node, n2)
574 if warn:
574 if warn:
575 self._repo.ui.status(
575 self._repo.ui.status(
576 (_("note: using %s as ancestor of %s and %s\n") %
576 (_("note: using %s as ancestor of %s and %s\n") %
577 (short(anc), short(self._node), short(n2))) +
577 (short(anc), short(self._node), short(n2))) +
578 ''.join(_(" alternatively, use --config "
578 ''.join(_(" alternatively, use --config "
579 "merge.preferancestor=%s\n") %
579 "merge.preferancestor=%s\n") %
580 short(n) for n in sorted(cahs) if n != anc))
580 short(n) for n in sorted(cahs) if n != anc))
581 return changectx(self._repo, anc)
581 return changectx(self._repo, anc)
582
582
583 def descendant(self, other):
583 def descendant(self, other):
584 """True if other is descendant of this changeset"""
584 """True if other is descendant of this changeset"""
585 return self._repo.changelog.descendant(self._rev, other._rev)
585 return self._repo.changelog.descendant(self._rev, other._rev)
586
586
587 def walk(self, match):
587 def walk(self, match):
588 fset = set(match.files())
588 fset = set(match.files())
589 # for dirstate.walk, files=['.'] means "walk the whole tree".
589 # for dirstate.walk, files=['.'] means "walk the whole tree".
590 # follow that here, too
590 # follow that here, too
591 fset.discard('.')
591 fset.discard('.')
592
592
593 # avoid the entire walk if we're only looking for specific files
593 # avoid the entire walk if we're only looking for specific files
594 if fset and not match.anypats():
594 if fset and not match.anypats():
595 if util.all([fn in self for fn in fset]):
595 if util.all([fn in self for fn in fset]):
596 for fn in sorted(fset):
596 for fn in sorted(fset):
597 if match(fn):
597 if match(fn):
598 yield fn
598 yield fn
599 raise StopIteration
599 raise StopIteration
600
600
601 for fn in self:
601 for fn in self:
602 if fn in fset:
602 if fn in fset:
603 # specified pattern is the exact name
603 # specified pattern is the exact name
604 fset.remove(fn)
604 fset.remove(fn)
605 if match(fn):
605 if match(fn):
606 yield fn
606 yield fn
607 for fn in sorted(fset):
607 for fn in sorted(fset):
608 if fn in self._dirs:
608 if fn in self._dirs:
609 # specified pattern is a directory
609 # specified pattern is a directory
610 continue
610 continue
611 match.bad(fn, _('no such file in rev %s') % self)
611 match.bad(fn, _('no such file in rev %s') % self)
612
612
613 def matches(self, match):
613 def matches(self, match):
614 return self.walk(match)
614 return self.walk(match)
615
615
616 class basefilectx(object):
616 class basefilectx(object):
617 """A filecontext object represents the common logic for its children:
617 """A filecontext object represents the common logic for its children:
618 filectx: read-only access to a filerevision that is already present
618 filectx: read-only access to a filerevision that is already present
619 in the repo,
619 in the repo,
620 workingfilectx: a filecontext that represents files from the working
620 workingfilectx: a filecontext that represents files from the working
621 directory,
621 directory,
622 memfilectx: a filecontext that represents files in-memory."""
622 memfilectx: a filecontext that represents files in-memory."""
623 def __new__(cls, repo, path, *args, **kwargs):
623 def __new__(cls, repo, path, *args, **kwargs):
624 return super(basefilectx, cls).__new__(cls)
624 return super(basefilectx, cls).__new__(cls)
625
625
626 @propertycache
626 @propertycache
627 def _filelog(self):
627 def _filelog(self):
628 return self._repo.file(self._path)
628 return self._repo.file(self._path)
629
629
630 @propertycache
630 @propertycache
631 def _changeid(self):
631 def _changeid(self):
632 if '_changeid' in self.__dict__:
632 if '_changeid' in self.__dict__:
633 return self._changeid
633 return self._changeid
634 elif '_changectx' in self.__dict__:
634 elif '_changectx' in self.__dict__:
635 return self._changectx.rev()
635 return self._changectx.rev()
636 elif '_descendantrev' in self.__dict__:
636 elif '_descendantrev' in self.__dict__:
637 # this file context was created from a revision with a known
637 # this file context was created from a revision with a known
638 # descendant, we can (lazily) correct for linkrev aliases
638 # descendant, we can (lazily) correct for linkrev aliases
639 return self._adjustlinkrev(self._path, self._filelog,
639 return self._adjustlinkrev(self._path, self._filelog,
640 self._filenode, self._descendantrev)
640 self._filenode, self._descendantrev)
641 else:
641 else:
642 return self._filelog.linkrev(self._filerev)
642 return self._filelog.linkrev(self._filerev)
643
643
644 @propertycache
644 @propertycache
645 def _filenode(self):
645 def _filenode(self):
646 if '_fileid' in self.__dict__:
646 if '_fileid' in self.__dict__:
647 return self._filelog.lookup(self._fileid)
647 return self._filelog.lookup(self._fileid)
648 else:
648 else:
649 return self._changectx.filenode(self._path)
649 return self._changectx.filenode(self._path)
650
650
651 @propertycache
651 @propertycache
652 def _filerev(self):
652 def _filerev(self):
653 return self._filelog.rev(self._filenode)
653 return self._filelog.rev(self._filenode)
654
654
655 @propertycache
655 @propertycache
656 def _repopath(self):
656 def _repopath(self):
657 return self._path
657 return self._path
658
658
659 def __nonzero__(self):
659 def __nonzero__(self):
660 try:
660 try:
661 self._filenode
661 self._filenode
662 return True
662 return True
663 except error.LookupError:
663 except error.LookupError:
664 # file is missing
664 # file is missing
665 return False
665 return False
666
666
667 def __str__(self):
667 def __str__(self):
668 return "%s@%s" % (self.path(), self._changectx)
668 return "%s@%s" % (self.path(), self._changectx)
669
669
670 def __repr__(self):
670 def __repr__(self):
671 return "<%s %s>" % (type(self).__name__, str(self))
671 return "<%s %s>" % (type(self).__name__, str(self))
672
672
673 def __hash__(self):
673 def __hash__(self):
674 try:
674 try:
675 return hash((self._path, self._filenode))
675 return hash((self._path, self._filenode))
676 except AttributeError:
676 except AttributeError:
677 return id(self)
677 return id(self)
678
678
679 def __eq__(self, other):
679 def __eq__(self, other):
680 try:
680 try:
681 return (type(self) == type(other) and self._path == other._path
681 return (type(self) == type(other) and self._path == other._path
682 and self._filenode == other._filenode)
682 and self._filenode == other._filenode)
683 except AttributeError:
683 except AttributeError:
684 return False
684 return False
685
685
686 def __ne__(self, other):
686 def __ne__(self, other):
687 return not (self == other)
687 return not (self == other)
688
688
689 def filerev(self):
689 def filerev(self):
690 return self._filerev
690 return self._filerev
691 def filenode(self):
691 def filenode(self):
692 return self._filenode
692 return self._filenode
693 def flags(self):
693 def flags(self):
694 return self._changectx.flags(self._path)
694 return self._changectx.flags(self._path)
695 def filelog(self):
695 def filelog(self):
696 return self._filelog
696 return self._filelog
697 def rev(self):
697 def rev(self):
698 return self._changeid
698 return self._changeid
699 def linkrev(self):
699 def linkrev(self):
700 return self._filelog.linkrev(self._filerev)
700 return self._filelog.linkrev(self._filerev)
701 def node(self):
701 def node(self):
702 return self._changectx.node()
702 return self._changectx.node()
703 def hex(self):
703 def hex(self):
704 return self._changectx.hex()
704 return self._changectx.hex()
705 def user(self):
705 def user(self):
706 return self._changectx.user()
706 return self._changectx.user()
707 def date(self):
707 def date(self):
708 return self._changectx.date()
708 return self._changectx.date()
709 def files(self):
709 def files(self):
710 return self._changectx.files()
710 return self._changectx.files()
711 def description(self):
711 def description(self):
712 return self._changectx.description()
712 return self._changectx.description()
713 def branch(self):
713 def branch(self):
714 return self._changectx.branch()
714 return self._changectx.branch()
715 def extra(self):
715 def extra(self):
716 return self._changectx.extra()
716 return self._changectx.extra()
717 def phase(self):
717 def phase(self):
718 return self._changectx.phase()
718 return self._changectx.phase()
719 def phasestr(self):
719 def phasestr(self):
720 return self._changectx.phasestr()
720 return self._changectx.phasestr()
721 def manifest(self):
721 def manifest(self):
722 return self._changectx.manifest()
722 return self._changectx.manifest()
723 def changectx(self):
723 def changectx(self):
724 return self._changectx
724 return self._changectx
725
725
726 def path(self):
726 def path(self):
727 return self._path
727 return self._path
728
728
729 def isbinary(self):
729 def isbinary(self):
730 try:
730 try:
731 return util.binary(self.data())
731 return util.binary(self.data())
732 except IOError:
732 except IOError:
733 return False
733 return False
734 def isexec(self):
734 def isexec(self):
735 return 'x' in self.flags()
735 return 'x' in self.flags()
736 def islink(self):
736 def islink(self):
737 return 'l' in self.flags()
737 return 'l' in self.flags()
738
738
739 def cmp(self, fctx):
739 def cmp(self, fctx):
740 """compare with other file context
740 """compare with other file context
741
741
742 returns True if different than fctx.
742 returns True if different than fctx.
743 """
743 """
744 if (fctx._filerev is None
744 if (fctx._filerev is None
745 and (self._repo._encodefilterpats
745 and (self._repo._encodefilterpats
746 # if file data starts with '\1\n', empty metadata block is
746 # if file data starts with '\1\n', empty metadata block is
747 # prepended, which adds 4 bytes to filelog.size().
747 # prepended, which adds 4 bytes to filelog.size().
748 or self.size() - 4 == fctx.size())
748 or self.size() - 4 == fctx.size())
749 or self.size() == fctx.size()):
749 or self.size() == fctx.size()):
750 return self._filelog.cmp(self._filenode, fctx.data())
750 return self._filelog.cmp(self._filenode, fctx.data())
751
751
752 return True
752 return True
753
753
754 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
754 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
755 """return the first ancestor of <srcrev> introducting <fnode>
755 """return the first ancestor of <srcrev> introducting <fnode>
756
756
757 If the linkrev of the file revision does not point to an ancestor of
757 If the linkrev of the file revision does not point to an ancestor of
758 srcrev, we'll walk down the ancestors until we find one introducing
758 srcrev, we'll walk down the ancestors until we find one introducing
759 this file revision.
759 this file revision.
760
760
761 :repo: a localrepository object (used to access changelog and manifest)
761 :repo: a localrepository object (used to access changelog and manifest)
762 :path: the file path
762 :path: the file path
763 :fnode: the nodeid of the file revision
763 :fnode: the nodeid of the file revision
764 :filelog: the filelog of this path
764 :filelog: the filelog of this path
765 :srcrev: the changeset revision we search ancestors from
765 :srcrev: the changeset revision we search ancestors from
766 :inclusive: if true, the src revision will also be checked
766 :inclusive: if true, the src revision will also be checked
767 """
767 """
768 repo = self._repo
768 repo = self._repo
769 cl = repo.unfiltered().changelog
769 cl = repo.unfiltered().changelog
770 ma = repo.manifest
770 ma = repo.manifest
771 # fetch the linkrev
771 # fetch the linkrev
772 fr = filelog.rev(fnode)
772 fr = filelog.rev(fnode)
773 lkr = filelog.linkrev(fr)
773 lkr = filelog.linkrev(fr)
774 # hack to reuse ancestor computation when searching for renames
774 # hack to reuse ancestor computation when searching for renames
775 memberanc = getattr(self, '_ancestrycontext', None)
775 memberanc = getattr(self, '_ancestrycontext', None)
776 iteranc = None
776 iteranc = None
777 if memberanc is None:
777 if memberanc is None:
778 memberanc = iteranc = cl.ancestors([srcrev], lkr,
778 memberanc = iteranc = cl.ancestors([srcrev], lkr,
779 inclusive=inclusive)
779 inclusive=inclusive)
780 # check if this linkrev is an ancestor of srcrev
780 # check if this linkrev is an ancestor of srcrev
781 if lkr not in memberanc:
781 if lkr not in memberanc:
782 if iteranc is None:
782 if iteranc is None:
783 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
783 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
784 for a in iteranc:
784 for a in iteranc:
785 ac = cl.read(a) # get changeset data (we avoid object creation)
785 ac = cl.read(a) # get changeset data (we avoid object creation)
786 if path in ac[3]: # checking the 'files' field.
786 if path in ac[3]: # checking the 'files' field.
787 # The file has been touched, check if the content is
787 # The file has been touched, check if the content is
788 # similar to the one we search for.
788 # similar to the one we search for.
789 if fnode == ma.readfast(ac[0]).get(path):
789 if fnode == ma.readfast(ac[0]).get(path):
790 return a
790 return a
791 # In theory, we should never get out of that loop without a result.
791 # In theory, we should never get out of that loop without a result.
792 # But if manifest uses a buggy file revision (not children of the
792 # But if manifest uses a buggy file revision (not children of the
793 # one it replaces) we could. Such a buggy situation will likely
793 # one it replaces) we could. Such a buggy situation will likely
794 # result is crash somewhere else at to some point.
794 # result is crash somewhere else at to some point.
795 return lkr
795 return lkr
796
796
797 def introrev(self):
797 def introrev(self):
798 """return the rev of the changeset which introduced this file revision
798 """return the rev of the changeset which introduced this file revision
799
799
800 This method is different from linkrev because it take into account the
800 This method is different from linkrev because it take into account the
801 changeset the filectx was created from. It ensures the returned
801 changeset the filectx was created from. It ensures the returned
802 revision is one of its ancestors. This prevents bugs from
802 revision is one of its ancestors. This prevents bugs from
803 'linkrev-shadowing' when a file revision is used by multiple
803 'linkrev-shadowing' when a file revision is used by multiple
804 changesets.
804 changesets.
805 """
805 """
806 lkr = self.linkrev()
806 lkr = self.linkrev()
807 attrs = vars(self)
807 attrs = vars(self)
808 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
808 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
809 if noctx or self.rev() == lkr:
809 if noctx or self.rev() == lkr:
810 return self.linkrev()
810 return self.linkrev()
811 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
811 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
812 self.rev(), inclusive=True)
812 self.rev(), inclusive=True)
813
813
814 def parents(self):
814 def parents(self):
815 _path = self._path
815 _path = self._path
816 fl = self._filelog
816 fl = self._filelog
817 parents = self._filelog.parents(self._filenode)
817 parents = self._filelog.parents(self._filenode)
818 pl = [(_path, node, fl) for node in parents if node != nullid]
818 pl = [(_path, node, fl) for node in parents if node != nullid]
819
819
820 r = fl.renamed(self._filenode)
820 r = fl.renamed(self._filenode)
821 if r:
821 if r:
822 # - In the simple rename case, both parent are nullid, pl is empty.
822 # - In the simple rename case, both parent are nullid, pl is empty.
823 # - In case of merge, only one of the parent is null id and should
823 # - In case of merge, only one of the parent is null id and should
824 # be replaced with the rename information. This parent is -always-
824 # be replaced with the rename information. This parent is -always-
825 # the first one.
825 # the first one.
826 #
826 #
827 # As null id have alway been filtered out in the previous list
827 # As null id have alway been filtered out in the previous list
828 # comprehension, inserting to 0 will always result in "replacing
828 # comprehension, inserting to 0 will always result in "replacing
829 # first nullid parent with rename information.
829 # first nullid parent with rename information.
830 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
830 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
831
831
832 ret = []
832 ret = []
833 for path, fnode, l in pl:
833 for path, fnode, l in pl:
834 if '_changeid' in vars(self) or '_changectx' in vars(self):
834 if '_changeid' in vars(self) or '_changectx' in vars(self):
835 # If self is associated with a changeset (probably explicitly
835 # If self is associated with a changeset (probably explicitly
836 # fed), ensure the created filectx is associated with a
836 # fed), ensure the created filectx is associated with a
837 # changeset that is an ancestor of self.changectx.
837 # changeset that is an ancestor of self.changectx.
838 rev = self._adjustlinkrev(path, l, fnode, self.rev())
838 # This lets us later use _adjustlinkrev to get a correct link.
839 fctx = filectx(self._repo, path, fileid=fnode, filelog=l,
839 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
840 changeid=rev)
840 fctx._descendantrev = self.rev()
841 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
841 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
842
842 elif '_descendantrev' in vars(self):
843 # Otherwise propagate _descendantrev if we have one associated.
844 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
845 fctx._descendantrev = self._descendantrev
846 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
843 else:
847 else:
844 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
848 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
845 ret.append(fctx)
849 ret.append(fctx)
846 return ret
850 return ret
847
851
848 def p1(self):
852 def p1(self):
849 return self.parents()[0]
853 return self.parents()[0]
850
854
851 def p2(self):
855 def p2(self):
852 p = self.parents()
856 p = self.parents()
853 if len(p) == 2:
857 if len(p) == 2:
854 return p[1]
858 return p[1]
855 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
859 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
856
860
857 def annotate(self, follow=False, linenumber=None, diffopts=None):
861 def annotate(self, follow=False, linenumber=None, diffopts=None):
858 '''returns a list of tuples of (ctx, line) for each line
862 '''returns a list of tuples of (ctx, line) for each line
859 in the file, where ctx is the filectx of the node where
863 in the file, where ctx is the filectx of the node where
860 that line was last changed.
864 that line was last changed.
861 This returns tuples of ((ctx, linenumber), line) for each line,
865 This returns tuples of ((ctx, linenumber), line) for each line,
862 if "linenumber" parameter is NOT "None".
866 if "linenumber" parameter is NOT "None".
863 In such tuples, linenumber means one at the first appearance
867 In such tuples, linenumber means one at the first appearance
864 in the managed file.
868 in the managed file.
865 To reduce annotation cost,
869 To reduce annotation cost,
866 this returns fixed value(False is used) as linenumber,
870 this returns fixed value(False is used) as linenumber,
867 if "linenumber" parameter is "False".'''
871 if "linenumber" parameter is "False".'''
868
872
869 if linenumber is None:
873 if linenumber is None:
870 def decorate(text, rev):
874 def decorate(text, rev):
871 return ([rev] * len(text.splitlines()), text)
875 return ([rev] * len(text.splitlines()), text)
872 elif linenumber:
876 elif linenumber:
873 def decorate(text, rev):
877 def decorate(text, rev):
874 size = len(text.splitlines())
878 size = len(text.splitlines())
875 return ([(rev, i) for i in xrange(1, size + 1)], text)
879 return ([(rev, i) for i in xrange(1, size + 1)], text)
876 else:
880 else:
877 def decorate(text, rev):
881 def decorate(text, rev):
878 return ([(rev, False)] * len(text.splitlines()), text)
882 return ([(rev, False)] * len(text.splitlines()), text)
879
883
880 def pair(parent, child):
884 def pair(parent, child):
881 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
885 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
882 refine=True)
886 refine=True)
883 for (a1, a2, b1, b2), t in blocks:
887 for (a1, a2, b1, b2), t in blocks:
884 # Changed blocks ('!') or blocks made only of blank lines ('~')
888 # Changed blocks ('!') or blocks made only of blank lines ('~')
885 # belong to the child.
889 # belong to the child.
886 if t == '=':
890 if t == '=':
887 child[0][b1:b2] = parent[0][a1:a2]
891 child[0][b1:b2] = parent[0][a1:a2]
888 return child
892 return child
889
893
890 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
894 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
891
895
892 def parents(f):
896 def parents(f):
893 pl = f.parents()
897 pl = f.parents()
894
898
895 # Don't return renamed parents if we aren't following.
899 # Don't return renamed parents if we aren't following.
896 if not follow:
900 if not follow:
897 pl = [p for p in pl if p.path() == f.path()]
901 pl = [p for p in pl if p.path() == f.path()]
898
902
899 # renamed filectx won't have a filelog yet, so set it
903 # renamed filectx won't have a filelog yet, so set it
900 # from the cache to save time
904 # from the cache to save time
901 for p in pl:
905 for p in pl:
902 if not '_filelog' in p.__dict__:
906 if not '_filelog' in p.__dict__:
903 p._filelog = getlog(p.path())
907 p._filelog = getlog(p.path())
904
908
905 return pl
909 return pl
906
910
907 # use linkrev to find the first changeset where self appeared
911 # use linkrev to find the first changeset where self appeared
908 base = self
912 base = self
909 introrev = self.introrev()
913 introrev = self.introrev()
910 if self.rev() != introrev:
914 if self.rev() != introrev:
911 base = self.filectx(self.filenode(), changeid=introrev)
915 base = self.filectx(self.filenode(), changeid=introrev)
912
916
913 # This algorithm would prefer to be recursive, but Python is a
917 # This algorithm would prefer to be recursive, but Python is a
914 # bit recursion-hostile. Instead we do an iterative
918 # bit recursion-hostile. Instead we do an iterative
915 # depth-first search.
919 # depth-first search.
916
920
917 visit = [base]
921 visit = [base]
918 hist = {}
922 hist = {}
919 pcache = {}
923 pcache = {}
920 needed = {base: 1}
924 needed = {base: 1}
921 while visit:
925 while visit:
922 f = visit[-1]
926 f = visit[-1]
923 pcached = f in pcache
927 pcached = f in pcache
924 if not pcached:
928 if not pcached:
925 pcache[f] = parents(f)
929 pcache[f] = parents(f)
926
930
927 ready = True
931 ready = True
928 pl = pcache[f]
932 pl = pcache[f]
929 for p in pl:
933 for p in pl:
930 if p not in hist:
934 if p not in hist:
931 ready = False
935 ready = False
932 visit.append(p)
936 visit.append(p)
933 if not pcached:
937 if not pcached:
934 needed[p] = needed.get(p, 0) + 1
938 needed[p] = needed.get(p, 0) + 1
935 if ready:
939 if ready:
936 visit.pop()
940 visit.pop()
937 reusable = f in hist
941 reusable = f in hist
938 if reusable:
942 if reusable:
939 curr = hist[f]
943 curr = hist[f]
940 else:
944 else:
941 curr = decorate(f.data(), f)
945 curr = decorate(f.data(), f)
942 for p in pl:
946 for p in pl:
943 if not reusable:
947 if not reusable:
944 curr = pair(hist[p], curr)
948 curr = pair(hist[p], curr)
945 if needed[p] == 1:
949 if needed[p] == 1:
946 del hist[p]
950 del hist[p]
947 del needed[p]
951 del needed[p]
948 else:
952 else:
949 needed[p] -= 1
953 needed[p] -= 1
950
954
951 hist[f] = curr
955 hist[f] = curr
952 pcache[f] = []
956 pcache[f] = []
953
957
954 return zip(hist[base][0], hist[base][1].splitlines(True))
958 return zip(hist[base][0], hist[base][1].splitlines(True))
955
959
956 def ancestors(self, followfirst=False):
960 def ancestors(self, followfirst=False):
957 visit = {}
961 visit = {}
958 c = self
962 c = self
959 cut = followfirst and 1 or None
963 cut = followfirst and 1 or None
960 while True:
964 while True:
961 for parent in c.parents()[:cut]:
965 for parent in c.parents()[:cut]:
962 visit[(parent.linkrev(), parent.filenode())] = parent
966 visit[(parent.linkrev(), parent.filenode())] = parent
963 if not visit:
967 if not visit:
964 break
968 break
965 c = visit.pop(max(visit))
969 c = visit.pop(max(visit))
966 yield c
970 yield c
967
971
968 class filectx(basefilectx):
972 class filectx(basefilectx):
969 """A filecontext object makes access to data related to a particular
973 """A filecontext object makes access to data related to a particular
970 filerevision convenient."""
974 filerevision convenient."""
971 def __init__(self, repo, path, changeid=None, fileid=None,
975 def __init__(self, repo, path, changeid=None, fileid=None,
972 filelog=None, changectx=None):
976 filelog=None, changectx=None):
973 """changeid can be a changeset revision, node, or tag.
977 """changeid can be a changeset revision, node, or tag.
974 fileid can be a file revision or node."""
978 fileid can be a file revision or node."""
975 self._repo = repo
979 self._repo = repo
976 self._path = path
980 self._path = path
977
981
978 assert (changeid is not None
982 assert (changeid is not None
979 or fileid is not None
983 or fileid is not None
980 or changectx is not None), \
984 or changectx is not None), \
981 ("bad args: changeid=%r, fileid=%r, changectx=%r"
985 ("bad args: changeid=%r, fileid=%r, changectx=%r"
982 % (changeid, fileid, changectx))
986 % (changeid, fileid, changectx))
983
987
984 if filelog is not None:
988 if filelog is not None:
985 self._filelog = filelog
989 self._filelog = filelog
986
990
987 if changeid is not None:
991 if changeid is not None:
988 self._changeid = changeid
992 self._changeid = changeid
989 if changectx is not None:
993 if changectx is not None:
990 self._changectx = changectx
994 self._changectx = changectx
991 if fileid is not None:
995 if fileid is not None:
992 self._fileid = fileid
996 self._fileid = fileid
993
997
994 @propertycache
998 @propertycache
995 def _changectx(self):
999 def _changectx(self):
996 try:
1000 try:
997 return changectx(self._repo, self._changeid)
1001 return changectx(self._repo, self._changeid)
998 except error.FilteredRepoLookupError:
1002 except error.FilteredRepoLookupError:
999 # Linkrev may point to any revision in the repository. When the
1003 # Linkrev may point to any revision in the repository. When the
1000 # repository is filtered this may lead to `filectx` trying to build
1004 # repository is filtered this may lead to `filectx` trying to build
1001 # `changectx` for filtered revision. In such case we fallback to
1005 # `changectx` for filtered revision. In such case we fallback to
1002 # creating `changectx` on the unfiltered version of the reposition.
1006 # creating `changectx` on the unfiltered version of the reposition.
1003 # This fallback should not be an issue because `changectx` from
1007 # This fallback should not be an issue because `changectx` from
1004 # `filectx` are not used in complex operations that care about
1008 # `filectx` are not used in complex operations that care about
1005 # filtering.
1009 # filtering.
1006 #
1010 #
1007 # This fallback is a cheap and dirty fix that prevent several
1011 # This fallback is a cheap and dirty fix that prevent several
1008 # crashes. It does not ensure the behavior is correct. However the
1012 # crashes. It does not ensure the behavior is correct. However the
1009 # behavior was not correct before filtering either and "incorrect
1013 # behavior was not correct before filtering either and "incorrect
1010 # behavior" is seen as better as "crash"
1014 # behavior" is seen as better as "crash"
1011 #
1015 #
1012 # Linkrevs have several serious troubles with filtering that are
1016 # Linkrevs have several serious troubles with filtering that are
1013 # complicated to solve. Proper handling of the issue here should be
1017 # complicated to solve. Proper handling of the issue here should be
1014 # considered when solving linkrev issue are on the table.
1018 # considered when solving linkrev issue are on the table.
1015 return changectx(self._repo.unfiltered(), self._changeid)
1019 return changectx(self._repo.unfiltered(), self._changeid)
1016
1020
1017 def filectx(self, fileid, changeid=None):
1021 def filectx(self, fileid, changeid=None):
1018 '''opens an arbitrary revision of the file without
1022 '''opens an arbitrary revision of the file without
1019 opening a new filelog'''
1023 opening a new filelog'''
1020 return filectx(self._repo, self._path, fileid=fileid,
1024 return filectx(self._repo, self._path, fileid=fileid,
1021 filelog=self._filelog, changeid=changeid)
1025 filelog=self._filelog, changeid=changeid)
1022
1026
1023 def data(self):
1027 def data(self):
1024 try:
1028 try:
1025 return self._filelog.read(self._filenode)
1029 return self._filelog.read(self._filenode)
1026 except error.CensoredNodeError:
1030 except error.CensoredNodeError:
1027 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1031 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1028 return ""
1032 return ""
1029 raise util.Abort(_("censored node: %s") % short(self._filenode),
1033 raise util.Abort(_("censored node: %s") % short(self._filenode),
1030 hint=_("set censor.policy to ignore errors"))
1034 hint=_("set censor.policy to ignore errors"))
1031
1035
1032 def size(self):
1036 def size(self):
1033 return self._filelog.size(self._filerev)
1037 return self._filelog.size(self._filerev)
1034
1038
1035 def renamed(self):
1039 def renamed(self):
1036 """check if file was actually renamed in this changeset revision
1040 """check if file was actually renamed in this changeset revision
1037
1041
1038 If rename logged in file revision, we report copy for changeset only
1042 If rename logged in file revision, we report copy for changeset only
1039 if file revisions linkrev points back to the changeset in question
1043 if file revisions linkrev points back to the changeset in question
1040 or both changeset parents contain different file revisions.
1044 or both changeset parents contain different file revisions.
1041 """
1045 """
1042
1046
1043 renamed = self._filelog.renamed(self._filenode)
1047 renamed = self._filelog.renamed(self._filenode)
1044 if not renamed:
1048 if not renamed:
1045 return renamed
1049 return renamed
1046
1050
1047 if self.rev() == self.linkrev():
1051 if self.rev() == self.linkrev():
1048 return renamed
1052 return renamed
1049
1053
1050 name = self.path()
1054 name = self.path()
1051 fnode = self._filenode
1055 fnode = self._filenode
1052 for p in self._changectx.parents():
1056 for p in self._changectx.parents():
1053 try:
1057 try:
1054 if fnode == p.filenode(name):
1058 if fnode == p.filenode(name):
1055 return None
1059 return None
1056 except error.LookupError:
1060 except error.LookupError:
1057 pass
1061 pass
1058 return renamed
1062 return renamed
1059
1063
1060 def children(self):
1064 def children(self):
1061 # hard for renames
1065 # hard for renames
1062 c = self._filelog.children(self._filenode)
1066 c = self._filelog.children(self._filenode)
1063 return [filectx(self._repo, self._path, fileid=x,
1067 return [filectx(self._repo, self._path, fileid=x,
1064 filelog=self._filelog) for x in c]
1068 filelog=self._filelog) for x in c]
1065
1069
1066 class committablectx(basectx):
1070 class committablectx(basectx):
1067 """A committablectx object provides common functionality for a context that
1071 """A committablectx object provides common functionality for a context that
1068 wants the ability to commit, e.g. workingctx or memctx."""
1072 wants the ability to commit, e.g. workingctx or memctx."""
1069 def __init__(self, repo, text="", user=None, date=None, extra=None,
1073 def __init__(self, repo, text="", user=None, date=None, extra=None,
1070 changes=None):
1074 changes=None):
1071 self._repo = repo
1075 self._repo = repo
1072 self._rev = None
1076 self._rev = None
1073 self._node = None
1077 self._node = None
1074 self._text = text
1078 self._text = text
1075 if date:
1079 if date:
1076 self._date = util.parsedate(date)
1080 self._date = util.parsedate(date)
1077 if user:
1081 if user:
1078 self._user = user
1082 self._user = user
1079 if changes:
1083 if changes:
1080 self._status = changes
1084 self._status = changes
1081
1085
1082 self._extra = {}
1086 self._extra = {}
1083 if extra:
1087 if extra:
1084 self._extra = extra.copy()
1088 self._extra = extra.copy()
1085 if 'branch' not in self._extra:
1089 if 'branch' not in self._extra:
1086 try:
1090 try:
1087 branch = encoding.fromlocal(self._repo.dirstate.branch())
1091 branch = encoding.fromlocal(self._repo.dirstate.branch())
1088 except UnicodeDecodeError:
1092 except UnicodeDecodeError:
1089 raise util.Abort(_('branch name not in UTF-8!'))
1093 raise util.Abort(_('branch name not in UTF-8!'))
1090 self._extra['branch'] = branch
1094 self._extra['branch'] = branch
1091 if self._extra['branch'] == '':
1095 if self._extra['branch'] == '':
1092 self._extra['branch'] = 'default'
1096 self._extra['branch'] = 'default'
1093
1097
1094 def __str__(self):
1098 def __str__(self):
1095 return str(self._parents[0]) + "+"
1099 return str(self._parents[0]) + "+"
1096
1100
1097 def __nonzero__(self):
1101 def __nonzero__(self):
1098 return True
1102 return True
1099
1103
1100 def _buildflagfunc(self):
1104 def _buildflagfunc(self):
1101 # Create a fallback function for getting file flags when the
1105 # Create a fallback function for getting file flags when the
1102 # filesystem doesn't support them
1106 # filesystem doesn't support them
1103
1107
1104 copiesget = self._repo.dirstate.copies().get
1108 copiesget = self._repo.dirstate.copies().get
1105
1109
1106 if len(self._parents) < 2:
1110 if len(self._parents) < 2:
1107 # when we have one parent, it's easy: copy from parent
1111 # when we have one parent, it's easy: copy from parent
1108 man = self._parents[0].manifest()
1112 man = self._parents[0].manifest()
1109 def func(f):
1113 def func(f):
1110 f = copiesget(f, f)
1114 f = copiesget(f, f)
1111 return man.flags(f)
1115 return man.flags(f)
1112 else:
1116 else:
1113 # merges are tricky: we try to reconstruct the unstored
1117 # merges are tricky: we try to reconstruct the unstored
1114 # result from the merge (issue1802)
1118 # result from the merge (issue1802)
1115 p1, p2 = self._parents
1119 p1, p2 = self._parents
1116 pa = p1.ancestor(p2)
1120 pa = p1.ancestor(p2)
1117 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1121 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1118
1122
1119 def func(f):
1123 def func(f):
1120 f = copiesget(f, f) # may be wrong for merges with copies
1124 f = copiesget(f, f) # may be wrong for merges with copies
1121 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1125 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1122 if fl1 == fl2:
1126 if fl1 == fl2:
1123 return fl1
1127 return fl1
1124 if fl1 == fla:
1128 if fl1 == fla:
1125 return fl2
1129 return fl2
1126 if fl2 == fla:
1130 if fl2 == fla:
1127 return fl1
1131 return fl1
1128 return '' # punt for conflicts
1132 return '' # punt for conflicts
1129
1133
1130 return func
1134 return func
1131
1135
1132 @propertycache
1136 @propertycache
1133 def _flagfunc(self):
1137 def _flagfunc(self):
1134 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1138 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1135
1139
1136 @propertycache
1140 @propertycache
1137 def _manifest(self):
1141 def _manifest(self):
1138 """generate a manifest corresponding to the values in self._status
1142 """generate a manifest corresponding to the values in self._status
1139
1143
1140 This reuse the file nodeid from parent, but we append an extra letter
1144 This reuse the file nodeid from parent, but we append an extra letter
1141 when modified. Modified files get an extra 'm' while added files get
1145 when modified. Modified files get an extra 'm' while added files get
1142 an extra 'a'. This is used by manifests merge to see that files
1146 an extra 'a'. This is used by manifests merge to see that files
1143 are different and by update logic to avoid deleting newly added files.
1147 are different and by update logic to avoid deleting newly added files.
1144 """
1148 """
1145
1149
1146 man1 = self._parents[0].manifest()
1150 man1 = self._parents[0].manifest()
1147 man = man1.copy()
1151 man = man1.copy()
1148 if len(self._parents) > 1:
1152 if len(self._parents) > 1:
1149 man2 = self.p2().manifest()
1153 man2 = self.p2().manifest()
1150 def getman(f):
1154 def getman(f):
1151 if f in man1:
1155 if f in man1:
1152 return man1
1156 return man1
1153 return man2
1157 return man2
1154 else:
1158 else:
1155 getman = lambda f: man1
1159 getman = lambda f: man1
1156
1160
1157 copied = self._repo.dirstate.copies()
1161 copied = self._repo.dirstate.copies()
1158 ff = self._flagfunc
1162 ff = self._flagfunc
1159 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1163 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1160 for f in l:
1164 for f in l:
1161 orig = copied.get(f, f)
1165 orig = copied.get(f, f)
1162 man[f] = getman(orig).get(orig, nullid) + i
1166 man[f] = getman(orig).get(orig, nullid) + i
1163 try:
1167 try:
1164 man.setflag(f, ff(f))
1168 man.setflag(f, ff(f))
1165 except OSError:
1169 except OSError:
1166 pass
1170 pass
1167
1171
1168 for f in self._status.deleted + self._status.removed:
1172 for f in self._status.deleted + self._status.removed:
1169 if f in man:
1173 if f in man:
1170 del man[f]
1174 del man[f]
1171
1175
1172 return man
1176 return man
1173
1177
1174 @propertycache
1178 @propertycache
1175 def _status(self):
1179 def _status(self):
1176 return self._repo.status()
1180 return self._repo.status()
1177
1181
1178 @propertycache
1182 @propertycache
1179 def _user(self):
1183 def _user(self):
1180 return self._repo.ui.username()
1184 return self._repo.ui.username()
1181
1185
1182 @propertycache
1186 @propertycache
1183 def _date(self):
1187 def _date(self):
1184 return util.makedate()
1188 return util.makedate()
1185
1189
1186 def subrev(self, subpath):
1190 def subrev(self, subpath):
1187 return None
1191 return None
1188
1192
1189 def user(self):
1193 def user(self):
1190 return self._user or self._repo.ui.username()
1194 return self._user or self._repo.ui.username()
1191 def date(self):
1195 def date(self):
1192 return self._date
1196 return self._date
1193 def description(self):
1197 def description(self):
1194 return self._text
1198 return self._text
1195 def files(self):
1199 def files(self):
1196 return sorted(self._status.modified + self._status.added +
1200 return sorted(self._status.modified + self._status.added +
1197 self._status.removed)
1201 self._status.removed)
1198
1202
1199 def modified(self):
1203 def modified(self):
1200 return self._status.modified
1204 return self._status.modified
1201 def added(self):
1205 def added(self):
1202 return self._status.added
1206 return self._status.added
1203 def removed(self):
1207 def removed(self):
1204 return self._status.removed
1208 return self._status.removed
1205 def deleted(self):
1209 def deleted(self):
1206 return self._status.deleted
1210 return self._status.deleted
1207 def branch(self):
1211 def branch(self):
1208 return encoding.tolocal(self._extra['branch'])
1212 return encoding.tolocal(self._extra['branch'])
1209 def closesbranch(self):
1213 def closesbranch(self):
1210 return 'close' in self._extra
1214 return 'close' in self._extra
1211 def extra(self):
1215 def extra(self):
1212 return self._extra
1216 return self._extra
1213
1217
1214 def tags(self):
1218 def tags(self):
1215 t = []
1219 t = []
1216 for p in self.parents():
1220 for p in self.parents():
1217 t.extend(p.tags())
1221 t.extend(p.tags())
1218 return t
1222 return t
1219
1223
1220 def bookmarks(self):
1224 def bookmarks(self):
1221 b = []
1225 b = []
1222 for p in self.parents():
1226 for p in self.parents():
1223 b.extend(p.bookmarks())
1227 b.extend(p.bookmarks())
1224 return b
1228 return b
1225
1229
1226 def phase(self):
1230 def phase(self):
1227 phase = phases.draft # default phase to draft
1231 phase = phases.draft # default phase to draft
1228 for p in self.parents():
1232 for p in self.parents():
1229 phase = max(phase, p.phase())
1233 phase = max(phase, p.phase())
1230 return phase
1234 return phase
1231
1235
1232 def hidden(self):
1236 def hidden(self):
1233 return False
1237 return False
1234
1238
1235 def children(self):
1239 def children(self):
1236 return []
1240 return []
1237
1241
1238 def flags(self, path):
1242 def flags(self, path):
1239 if '_manifest' in self.__dict__:
1243 if '_manifest' in self.__dict__:
1240 try:
1244 try:
1241 return self._manifest.flags(path)
1245 return self._manifest.flags(path)
1242 except KeyError:
1246 except KeyError:
1243 return ''
1247 return ''
1244
1248
1245 try:
1249 try:
1246 return self._flagfunc(path)
1250 return self._flagfunc(path)
1247 except OSError:
1251 except OSError:
1248 return ''
1252 return ''
1249
1253
1250 def ancestor(self, c2):
1254 def ancestor(self, c2):
1251 """return the "best" ancestor context of self and c2"""
1255 """return the "best" ancestor context of self and c2"""
1252 return self._parents[0].ancestor(c2) # punt on two parents for now
1256 return self._parents[0].ancestor(c2) # punt on two parents for now
1253
1257
1254 def walk(self, match):
1258 def walk(self, match):
1255 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1259 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1256 True, False))
1260 True, False))
1257
1261
1258 def matches(self, match):
1262 def matches(self, match):
1259 return sorted(self._repo.dirstate.matches(match))
1263 return sorted(self._repo.dirstate.matches(match))
1260
1264
1261 def ancestors(self):
1265 def ancestors(self):
1262 for p in self._parents:
1266 for p in self._parents:
1263 yield p
1267 yield p
1264 for a in self._repo.changelog.ancestors(
1268 for a in self._repo.changelog.ancestors(
1265 [p.rev() for p in self._parents]):
1269 [p.rev() for p in self._parents]):
1266 yield changectx(self._repo, a)
1270 yield changectx(self._repo, a)
1267
1271
1268 def markcommitted(self, node):
1272 def markcommitted(self, node):
1269 """Perform post-commit cleanup necessary after committing this ctx
1273 """Perform post-commit cleanup necessary after committing this ctx
1270
1274
1271 Specifically, this updates backing stores this working context
1275 Specifically, this updates backing stores this working context
1272 wraps to reflect the fact that the changes reflected by this
1276 wraps to reflect the fact that the changes reflected by this
1273 workingctx have been committed. For example, it marks
1277 workingctx have been committed. For example, it marks
1274 modified and added files as normal in the dirstate.
1278 modified and added files as normal in the dirstate.
1275
1279
1276 """
1280 """
1277
1281
1278 self._repo.dirstate.beginparentchange()
1282 self._repo.dirstate.beginparentchange()
1279 for f in self.modified() + self.added():
1283 for f in self.modified() + self.added():
1280 self._repo.dirstate.normal(f)
1284 self._repo.dirstate.normal(f)
1281 for f in self.removed():
1285 for f in self.removed():
1282 self._repo.dirstate.drop(f)
1286 self._repo.dirstate.drop(f)
1283 self._repo.dirstate.setparents(node)
1287 self._repo.dirstate.setparents(node)
1284 self._repo.dirstate.endparentchange()
1288 self._repo.dirstate.endparentchange()
1285
1289
1286 def dirs(self):
1290 def dirs(self):
1287 return self._repo.dirstate.dirs()
1291 return self._repo.dirstate.dirs()
1288
1292
1289 class workingctx(committablectx):
1293 class workingctx(committablectx):
1290 """A workingctx object makes access to data related to
1294 """A workingctx object makes access to data related to
1291 the current working directory convenient.
1295 the current working directory convenient.
1292 date - any valid date string or (unixtime, offset), or None.
1296 date - any valid date string or (unixtime, offset), or None.
1293 user - username string, or None.
1297 user - username string, or None.
1294 extra - a dictionary of extra values, or None.
1298 extra - a dictionary of extra values, or None.
1295 changes - a list of file lists as returned by localrepo.status()
1299 changes - a list of file lists as returned by localrepo.status()
1296 or None to use the repository status.
1300 or None to use the repository status.
1297 """
1301 """
1298 def __init__(self, repo, text="", user=None, date=None, extra=None,
1302 def __init__(self, repo, text="", user=None, date=None, extra=None,
1299 changes=None):
1303 changes=None):
1300 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1304 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1301
1305
1302 def __iter__(self):
1306 def __iter__(self):
1303 d = self._repo.dirstate
1307 d = self._repo.dirstate
1304 for f in d:
1308 for f in d:
1305 if d[f] != 'r':
1309 if d[f] != 'r':
1306 yield f
1310 yield f
1307
1311
1308 def __contains__(self, key):
1312 def __contains__(self, key):
1309 return self._repo.dirstate[key] not in "?r"
1313 return self._repo.dirstate[key] not in "?r"
1310
1314
1311 @propertycache
1315 @propertycache
1312 def _parents(self):
1316 def _parents(self):
1313 p = self._repo.dirstate.parents()
1317 p = self._repo.dirstate.parents()
1314 if p[1] == nullid:
1318 if p[1] == nullid:
1315 p = p[:-1]
1319 p = p[:-1]
1316 return [changectx(self._repo, x) for x in p]
1320 return [changectx(self._repo, x) for x in p]
1317
1321
1318 def filectx(self, path, filelog=None):
1322 def filectx(self, path, filelog=None):
1319 """get a file context from the working directory"""
1323 """get a file context from the working directory"""
1320 return workingfilectx(self._repo, path, workingctx=self,
1324 return workingfilectx(self._repo, path, workingctx=self,
1321 filelog=filelog)
1325 filelog=filelog)
1322
1326
1323 def dirty(self, missing=False, merge=True, branch=True):
1327 def dirty(self, missing=False, merge=True, branch=True):
1324 "check whether a working directory is modified"
1328 "check whether a working directory is modified"
1325 # check subrepos first
1329 # check subrepos first
1326 for s in sorted(self.substate):
1330 for s in sorted(self.substate):
1327 if self.sub(s).dirty():
1331 if self.sub(s).dirty():
1328 return True
1332 return True
1329 # check current working dir
1333 # check current working dir
1330 return ((merge and self.p2()) or
1334 return ((merge and self.p2()) or
1331 (branch and self.branch() != self.p1().branch()) or
1335 (branch and self.branch() != self.p1().branch()) or
1332 self.modified() or self.added() or self.removed() or
1336 self.modified() or self.added() or self.removed() or
1333 (missing and self.deleted()))
1337 (missing and self.deleted()))
1334
1338
1335 def add(self, list, prefix=""):
1339 def add(self, list, prefix=""):
1336 join = lambda f: os.path.join(prefix, f)
1340 join = lambda f: os.path.join(prefix, f)
1337 wlock = self._repo.wlock()
1341 wlock = self._repo.wlock()
1338 ui, ds = self._repo.ui, self._repo.dirstate
1342 ui, ds = self._repo.ui, self._repo.dirstate
1339 try:
1343 try:
1340 rejected = []
1344 rejected = []
1341 lstat = self._repo.wvfs.lstat
1345 lstat = self._repo.wvfs.lstat
1342 for f in list:
1346 for f in list:
1343 scmutil.checkportable(ui, join(f))
1347 scmutil.checkportable(ui, join(f))
1344 try:
1348 try:
1345 st = lstat(f)
1349 st = lstat(f)
1346 except OSError:
1350 except OSError:
1347 ui.warn(_("%s does not exist!\n") % join(f))
1351 ui.warn(_("%s does not exist!\n") % join(f))
1348 rejected.append(f)
1352 rejected.append(f)
1349 continue
1353 continue
1350 if st.st_size > 10000000:
1354 if st.st_size > 10000000:
1351 ui.warn(_("%s: up to %d MB of RAM may be required "
1355 ui.warn(_("%s: up to %d MB of RAM may be required "
1352 "to manage this file\n"
1356 "to manage this file\n"
1353 "(use 'hg revert %s' to cancel the "
1357 "(use 'hg revert %s' to cancel the "
1354 "pending addition)\n")
1358 "pending addition)\n")
1355 % (f, 3 * st.st_size // 1000000, join(f)))
1359 % (f, 3 * st.st_size // 1000000, join(f)))
1356 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1360 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1357 ui.warn(_("%s not added: only files and symlinks "
1361 ui.warn(_("%s not added: only files and symlinks "
1358 "supported currently\n") % join(f))
1362 "supported currently\n") % join(f))
1359 rejected.append(f)
1363 rejected.append(f)
1360 elif ds[f] in 'amn':
1364 elif ds[f] in 'amn':
1361 ui.warn(_("%s already tracked!\n") % join(f))
1365 ui.warn(_("%s already tracked!\n") % join(f))
1362 elif ds[f] == 'r':
1366 elif ds[f] == 'r':
1363 ds.normallookup(f)
1367 ds.normallookup(f)
1364 else:
1368 else:
1365 ds.add(f)
1369 ds.add(f)
1366 return rejected
1370 return rejected
1367 finally:
1371 finally:
1368 wlock.release()
1372 wlock.release()
1369
1373
1370 def forget(self, files, prefix=""):
1374 def forget(self, files, prefix=""):
1371 join = lambda f: os.path.join(prefix, f)
1375 join = lambda f: os.path.join(prefix, f)
1372 wlock = self._repo.wlock()
1376 wlock = self._repo.wlock()
1373 try:
1377 try:
1374 rejected = []
1378 rejected = []
1375 for f in files:
1379 for f in files:
1376 if f not in self._repo.dirstate:
1380 if f not in self._repo.dirstate:
1377 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1381 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1378 rejected.append(f)
1382 rejected.append(f)
1379 elif self._repo.dirstate[f] != 'a':
1383 elif self._repo.dirstate[f] != 'a':
1380 self._repo.dirstate.remove(f)
1384 self._repo.dirstate.remove(f)
1381 else:
1385 else:
1382 self._repo.dirstate.drop(f)
1386 self._repo.dirstate.drop(f)
1383 return rejected
1387 return rejected
1384 finally:
1388 finally:
1385 wlock.release()
1389 wlock.release()
1386
1390
1387 def undelete(self, list):
1391 def undelete(self, list):
1388 pctxs = self.parents()
1392 pctxs = self.parents()
1389 wlock = self._repo.wlock()
1393 wlock = self._repo.wlock()
1390 try:
1394 try:
1391 for f in list:
1395 for f in list:
1392 if self._repo.dirstate[f] != 'r':
1396 if self._repo.dirstate[f] != 'r':
1393 self._repo.ui.warn(_("%s not removed!\n") % f)
1397 self._repo.ui.warn(_("%s not removed!\n") % f)
1394 else:
1398 else:
1395 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1399 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1396 t = fctx.data()
1400 t = fctx.data()
1397 self._repo.wwrite(f, t, fctx.flags())
1401 self._repo.wwrite(f, t, fctx.flags())
1398 self._repo.dirstate.normal(f)
1402 self._repo.dirstate.normal(f)
1399 finally:
1403 finally:
1400 wlock.release()
1404 wlock.release()
1401
1405
1402 def copy(self, source, dest):
1406 def copy(self, source, dest):
1403 try:
1407 try:
1404 st = self._repo.wvfs.lstat(dest)
1408 st = self._repo.wvfs.lstat(dest)
1405 except OSError, err:
1409 except OSError, err:
1406 if err.errno != errno.ENOENT:
1410 if err.errno != errno.ENOENT:
1407 raise
1411 raise
1408 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1412 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1409 return
1413 return
1410 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1414 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1411 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1415 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1412 "symbolic link\n") % dest)
1416 "symbolic link\n") % dest)
1413 else:
1417 else:
1414 wlock = self._repo.wlock()
1418 wlock = self._repo.wlock()
1415 try:
1419 try:
1416 if self._repo.dirstate[dest] in '?':
1420 if self._repo.dirstate[dest] in '?':
1417 self._repo.dirstate.add(dest)
1421 self._repo.dirstate.add(dest)
1418 elif self._repo.dirstate[dest] in 'r':
1422 elif self._repo.dirstate[dest] in 'r':
1419 self._repo.dirstate.normallookup(dest)
1423 self._repo.dirstate.normallookup(dest)
1420 self._repo.dirstate.copy(source, dest)
1424 self._repo.dirstate.copy(source, dest)
1421 finally:
1425 finally:
1422 wlock.release()
1426 wlock.release()
1423
1427
1424 def _filtersuspectsymlink(self, files):
1428 def _filtersuspectsymlink(self, files):
1425 if not files or self._repo.dirstate._checklink:
1429 if not files or self._repo.dirstate._checklink:
1426 return files
1430 return files
1427
1431
1428 # Symlink placeholders may get non-symlink-like contents
1432 # Symlink placeholders may get non-symlink-like contents
1429 # via user error or dereferencing by NFS or Samba servers,
1433 # via user error or dereferencing by NFS or Samba servers,
1430 # so we filter out any placeholders that don't look like a
1434 # so we filter out any placeholders that don't look like a
1431 # symlink
1435 # symlink
1432 sane = []
1436 sane = []
1433 for f in files:
1437 for f in files:
1434 if self.flags(f) == 'l':
1438 if self.flags(f) == 'l':
1435 d = self[f].data()
1439 d = self[f].data()
1436 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1440 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1437 self._repo.ui.debug('ignoring suspect symlink placeholder'
1441 self._repo.ui.debug('ignoring suspect symlink placeholder'
1438 ' "%s"\n' % f)
1442 ' "%s"\n' % f)
1439 continue
1443 continue
1440 sane.append(f)
1444 sane.append(f)
1441 return sane
1445 return sane
1442
1446
1443 def _checklookup(self, files):
1447 def _checklookup(self, files):
1444 # check for any possibly clean files
1448 # check for any possibly clean files
1445 if not files:
1449 if not files:
1446 return [], []
1450 return [], []
1447
1451
1448 modified = []
1452 modified = []
1449 fixup = []
1453 fixup = []
1450 pctx = self._parents[0]
1454 pctx = self._parents[0]
1451 # do a full compare of any files that might have changed
1455 # do a full compare of any files that might have changed
1452 for f in sorted(files):
1456 for f in sorted(files):
1453 if (f not in pctx or self.flags(f) != pctx.flags(f)
1457 if (f not in pctx or self.flags(f) != pctx.flags(f)
1454 or pctx[f].cmp(self[f])):
1458 or pctx[f].cmp(self[f])):
1455 modified.append(f)
1459 modified.append(f)
1456 else:
1460 else:
1457 fixup.append(f)
1461 fixup.append(f)
1458
1462
1459 # update dirstate for files that are actually clean
1463 # update dirstate for files that are actually clean
1460 if fixup:
1464 if fixup:
1461 try:
1465 try:
1462 # updating the dirstate is optional
1466 # updating the dirstate is optional
1463 # so we don't wait on the lock
1467 # so we don't wait on the lock
1464 # wlock can invalidate the dirstate, so cache normal _after_
1468 # wlock can invalidate the dirstate, so cache normal _after_
1465 # taking the lock
1469 # taking the lock
1466 wlock = self._repo.wlock(False)
1470 wlock = self._repo.wlock(False)
1467 normal = self._repo.dirstate.normal
1471 normal = self._repo.dirstate.normal
1468 try:
1472 try:
1469 for f in fixup:
1473 for f in fixup:
1470 normal(f)
1474 normal(f)
1471 finally:
1475 finally:
1472 wlock.release()
1476 wlock.release()
1473 except error.LockError:
1477 except error.LockError:
1474 pass
1478 pass
1475 return modified, fixup
1479 return modified, fixup
1476
1480
1477 def _manifestmatches(self, match, s):
1481 def _manifestmatches(self, match, s):
1478 """Slow path for workingctx
1482 """Slow path for workingctx
1479
1483
1480 The fast path is when we compare the working directory to its parent
1484 The fast path is when we compare the working directory to its parent
1481 which means this function is comparing with a non-parent; therefore we
1485 which means this function is comparing with a non-parent; therefore we
1482 need to build a manifest and return what matches.
1486 need to build a manifest and return what matches.
1483 """
1487 """
1484 mf = self._repo['.']._manifestmatches(match, s)
1488 mf = self._repo['.']._manifestmatches(match, s)
1485 for f in s.modified + s.added:
1489 for f in s.modified + s.added:
1486 mf[f] = _newnode
1490 mf[f] = _newnode
1487 mf.setflag(f, self.flags(f))
1491 mf.setflag(f, self.flags(f))
1488 for f in s.removed:
1492 for f in s.removed:
1489 if f in mf:
1493 if f in mf:
1490 del mf[f]
1494 del mf[f]
1491 return mf
1495 return mf
1492
1496
1493 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1497 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1494 unknown=False):
1498 unknown=False):
1495 '''Gets the status from the dirstate -- internal use only.'''
1499 '''Gets the status from the dirstate -- internal use only.'''
1496 listignored, listclean, listunknown = ignored, clean, unknown
1500 listignored, listclean, listunknown = ignored, clean, unknown
1497 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1501 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1498 subrepos = []
1502 subrepos = []
1499 if '.hgsub' in self:
1503 if '.hgsub' in self:
1500 subrepos = sorted(self.substate)
1504 subrepos = sorted(self.substate)
1501 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1505 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1502 listclean, listunknown)
1506 listclean, listunknown)
1503
1507
1504 # check for any possibly clean files
1508 # check for any possibly clean files
1505 if cmp:
1509 if cmp:
1506 modified2, fixup = self._checklookup(cmp)
1510 modified2, fixup = self._checklookup(cmp)
1507 s.modified.extend(modified2)
1511 s.modified.extend(modified2)
1508
1512
1509 # update dirstate for files that are actually clean
1513 # update dirstate for files that are actually clean
1510 if fixup and listclean:
1514 if fixup and listclean:
1511 s.clean.extend(fixup)
1515 s.clean.extend(fixup)
1512
1516
1513 if match.always():
1517 if match.always():
1514 # cache for performance
1518 # cache for performance
1515 if s.unknown or s.ignored or s.clean:
1519 if s.unknown or s.ignored or s.clean:
1516 # "_status" is cached with list*=False in the normal route
1520 # "_status" is cached with list*=False in the normal route
1517 self._status = scmutil.status(s.modified, s.added, s.removed,
1521 self._status = scmutil.status(s.modified, s.added, s.removed,
1518 s.deleted, [], [], [])
1522 s.deleted, [], [], [])
1519 else:
1523 else:
1520 self._status = s
1524 self._status = s
1521
1525
1522 return s
1526 return s
1523
1527
1524 def _buildstatus(self, other, s, match, listignored, listclean,
1528 def _buildstatus(self, other, s, match, listignored, listclean,
1525 listunknown):
1529 listunknown):
1526 """build a status with respect to another context
1530 """build a status with respect to another context
1527
1531
1528 This includes logic for maintaining the fast path of status when
1532 This includes logic for maintaining the fast path of status when
1529 comparing the working directory against its parent, which is to skip
1533 comparing the working directory against its parent, which is to skip
1530 building a new manifest if self (working directory) is not comparing
1534 building a new manifest if self (working directory) is not comparing
1531 against its parent (repo['.']).
1535 against its parent (repo['.']).
1532 """
1536 """
1533 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1537 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1534 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1538 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1535 # might have accidentally ended up with the entire contents of the file
1539 # might have accidentally ended up with the entire contents of the file
1536 # they are supposed to be linking to.
1540 # they are supposed to be linking to.
1537 s.modified[:] = self._filtersuspectsymlink(s.modified)
1541 s.modified[:] = self._filtersuspectsymlink(s.modified)
1538 if other != self._repo['.']:
1542 if other != self._repo['.']:
1539 s = super(workingctx, self)._buildstatus(other, s, match,
1543 s = super(workingctx, self)._buildstatus(other, s, match,
1540 listignored, listclean,
1544 listignored, listclean,
1541 listunknown)
1545 listunknown)
1542 return s
1546 return s
1543
1547
1544 def _matchstatus(self, other, match):
1548 def _matchstatus(self, other, match):
1545 """override the match method with a filter for directory patterns
1549 """override the match method with a filter for directory patterns
1546
1550
1547 We use inheritance to customize the match.bad method only in cases of
1551 We use inheritance to customize the match.bad method only in cases of
1548 workingctx since it belongs only to the working directory when
1552 workingctx since it belongs only to the working directory when
1549 comparing against the parent changeset.
1553 comparing against the parent changeset.
1550
1554
1551 If we aren't comparing against the working directory's parent, then we
1555 If we aren't comparing against the working directory's parent, then we
1552 just use the default match object sent to us.
1556 just use the default match object sent to us.
1553 """
1557 """
1554 superself = super(workingctx, self)
1558 superself = super(workingctx, self)
1555 match = superself._matchstatus(other, match)
1559 match = superself._matchstatus(other, match)
1556 if other != self._repo['.']:
1560 if other != self._repo['.']:
1557 def bad(f, msg):
1561 def bad(f, msg):
1558 # 'f' may be a directory pattern from 'match.files()',
1562 # 'f' may be a directory pattern from 'match.files()',
1559 # so 'f not in ctx1' is not enough
1563 # so 'f not in ctx1' is not enough
1560 if f not in other and f not in other.dirs():
1564 if f not in other and f not in other.dirs():
1561 self._repo.ui.warn('%s: %s\n' %
1565 self._repo.ui.warn('%s: %s\n' %
1562 (self._repo.dirstate.pathto(f), msg))
1566 (self._repo.dirstate.pathto(f), msg))
1563 match.bad = bad
1567 match.bad = bad
1564 return match
1568 return match
1565
1569
1566 class committablefilectx(basefilectx):
1570 class committablefilectx(basefilectx):
1567 """A committablefilectx provides common functionality for a file context
1571 """A committablefilectx provides common functionality for a file context
1568 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1572 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1569 def __init__(self, repo, path, filelog=None, ctx=None):
1573 def __init__(self, repo, path, filelog=None, ctx=None):
1570 self._repo = repo
1574 self._repo = repo
1571 self._path = path
1575 self._path = path
1572 self._changeid = None
1576 self._changeid = None
1573 self._filerev = self._filenode = None
1577 self._filerev = self._filenode = None
1574
1578
1575 if filelog is not None:
1579 if filelog is not None:
1576 self._filelog = filelog
1580 self._filelog = filelog
1577 if ctx:
1581 if ctx:
1578 self._changectx = ctx
1582 self._changectx = ctx
1579
1583
1580 def __nonzero__(self):
1584 def __nonzero__(self):
1581 return True
1585 return True
1582
1586
1583 def parents(self):
1587 def parents(self):
1584 '''return parent filectxs, following copies if necessary'''
1588 '''return parent filectxs, following copies if necessary'''
1585 def filenode(ctx, path):
1589 def filenode(ctx, path):
1586 return ctx._manifest.get(path, nullid)
1590 return ctx._manifest.get(path, nullid)
1587
1591
1588 path = self._path
1592 path = self._path
1589 fl = self._filelog
1593 fl = self._filelog
1590 pcl = self._changectx._parents
1594 pcl = self._changectx._parents
1591 renamed = self.renamed()
1595 renamed = self.renamed()
1592
1596
1593 if renamed:
1597 if renamed:
1594 pl = [renamed + (None,)]
1598 pl = [renamed + (None,)]
1595 else:
1599 else:
1596 pl = [(path, filenode(pcl[0], path), fl)]
1600 pl = [(path, filenode(pcl[0], path), fl)]
1597
1601
1598 for pc in pcl[1:]:
1602 for pc in pcl[1:]:
1599 pl.append((path, filenode(pc, path), fl))
1603 pl.append((path, filenode(pc, path), fl))
1600
1604
1601 return [filectx(self._repo, p, fileid=n, filelog=l)
1605 return [filectx(self._repo, p, fileid=n, filelog=l)
1602 for p, n, l in pl if n != nullid]
1606 for p, n, l in pl if n != nullid]
1603
1607
1604 def children(self):
1608 def children(self):
1605 return []
1609 return []
1606
1610
1607 class workingfilectx(committablefilectx):
1611 class workingfilectx(committablefilectx):
1608 """A workingfilectx object makes access to data related to a particular
1612 """A workingfilectx object makes access to data related to a particular
1609 file in the working directory convenient."""
1613 file in the working directory convenient."""
1610 def __init__(self, repo, path, filelog=None, workingctx=None):
1614 def __init__(self, repo, path, filelog=None, workingctx=None):
1611 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1615 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1612
1616
1613 @propertycache
1617 @propertycache
1614 def _changectx(self):
1618 def _changectx(self):
1615 return workingctx(self._repo)
1619 return workingctx(self._repo)
1616
1620
1617 def data(self):
1621 def data(self):
1618 return self._repo.wread(self._path)
1622 return self._repo.wread(self._path)
1619 def renamed(self):
1623 def renamed(self):
1620 rp = self._repo.dirstate.copied(self._path)
1624 rp = self._repo.dirstate.copied(self._path)
1621 if not rp:
1625 if not rp:
1622 return None
1626 return None
1623 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1627 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1624
1628
1625 def size(self):
1629 def size(self):
1626 return self._repo.wvfs.lstat(self._path).st_size
1630 return self._repo.wvfs.lstat(self._path).st_size
1627 def date(self):
1631 def date(self):
1628 t, tz = self._changectx.date()
1632 t, tz = self._changectx.date()
1629 try:
1633 try:
1630 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1634 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1631 except OSError, err:
1635 except OSError, err:
1632 if err.errno != errno.ENOENT:
1636 if err.errno != errno.ENOENT:
1633 raise
1637 raise
1634 return (t, tz)
1638 return (t, tz)
1635
1639
1636 def cmp(self, fctx):
1640 def cmp(self, fctx):
1637 """compare with other file context
1641 """compare with other file context
1638
1642
1639 returns True if different than fctx.
1643 returns True if different than fctx.
1640 """
1644 """
1641 # fctx should be a filectx (not a workingfilectx)
1645 # fctx should be a filectx (not a workingfilectx)
1642 # invert comparison to reuse the same code path
1646 # invert comparison to reuse the same code path
1643 return fctx.cmp(self)
1647 return fctx.cmp(self)
1644
1648
1645 def remove(self, ignoremissing=False):
1649 def remove(self, ignoremissing=False):
1646 """wraps unlink for a repo's working directory"""
1650 """wraps unlink for a repo's working directory"""
1647 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1651 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1648
1652
1649 def write(self, data, flags):
1653 def write(self, data, flags):
1650 """wraps repo.wwrite"""
1654 """wraps repo.wwrite"""
1651 self._repo.wwrite(self._path, data, flags)
1655 self._repo.wwrite(self._path, data, flags)
1652
1656
1653 class workingcommitctx(workingctx):
1657 class workingcommitctx(workingctx):
1654 """A workingcommitctx object makes access to data related to
1658 """A workingcommitctx object makes access to data related to
1655 the revision being committed convenient.
1659 the revision being committed convenient.
1656
1660
1657 This hides changes in the working directory, if they aren't
1661 This hides changes in the working directory, if they aren't
1658 committed in this context.
1662 committed in this context.
1659 """
1663 """
1660 def __init__(self, repo, changes,
1664 def __init__(self, repo, changes,
1661 text="", user=None, date=None, extra=None):
1665 text="", user=None, date=None, extra=None):
1662 super(workingctx, self).__init__(repo, text, user, date, extra,
1666 super(workingctx, self).__init__(repo, text, user, date, extra,
1663 changes)
1667 changes)
1664
1668
1665 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1669 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1666 unknown=False):
1670 unknown=False):
1667 """Return matched files only in ``self._status``
1671 """Return matched files only in ``self._status``
1668
1672
1669 Uncommitted files appear "clean" via this context, even if
1673 Uncommitted files appear "clean" via this context, even if
1670 they aren't actually so in the working directory.
1674 they aren't actually so in the working directory.
1671 """
1675 """
1672 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1676 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1673 if clean:
1677 if clean:
1674 clean = [f for f in self._manifest if f not in self._changedset]
1678 clean = [f for f in self._manifest if f not in self._changedset]
1675 else:
1679 else:
1676 clean = []
1680 clean = []
1677 return scmutil.status([f for f in self._status.modified if match(f)],
1681 return scmutil.status([f for f in self._status.modified if match(f)],
1678 [f for f in self._status.added if match(f)],
1682 [f for f in self._status.added if match(f)],
1679 [f for f in self._status.removed if match(f)],
1683 [f for f in self._status.removed if match(f)],
1680 [], [], [], clean)
1684 [], [], [], clean)
1681
1685
1682 @propertycache
1686 @propertycache
1683 def _changedset(self):
1687 def _changedset(self):
1684 """Return the set of files changed in this context
1688 """Return the set of files changed in this context
1685 """
1689 """
1686 changed = set(self._status.modified)
1690 changed = set(self._status.modified)
1687 changed.update(self._status.added)
1691 changed.update(self._status.added)
1688 changed.update(self._status.removed)
1692 changed.update(self._status.removed)
1689 return changed
1693 return changed
1690
1694
1691 class memctx(committablectx):
1695 class memctx(committablectx):
1692 """Use memctx to perform in-memory commits via localrepo.commitctx().
1696 """Use memctx to perform in-memory commits via localrepo.commitctx().
1693
1697
1694 Revision information is supplied at initialization time while
1698 Revision information is supplied at initialization time while
1695 related files data and is made available through a callback
1699 related files data and is made available through a callback
1696 mechanism. 'repo' is the current localrepo, 'parents' is a
1700 mechanism. 'repo' is the current localrepo, 'parents' is a
1697 sequence of two parent revisions identifiers (pass None for every
1701 sequence of two parent revisions identifiers (pass None for every
1698 missing parent), 'text' is the commit message and 'files' lists
1702 missing parent), 'text' is the commit message and 'files' lists
1699 names of files touched by the revision (normalized and relative to
1703 names of files touched by the revision (normalized and relative to
1700 repository root).
1704 repository root).
1701
1705
1702 filectxfn(repo, memctx, path) is a callable receiving the
1706 filectxfn(repo, memctx, path) is a callable receiving the
1703 repository, the current memctx object and the normalized path of
1707 repository, the current memctx object and the normalized path of
1704 requested file, relative to repository root. It is fired by the
1708 requested file, relative to repository root. It is fired by the
1705 commit function for every file in 'files', but calls order is
1709 commit function for every file in 'files', but calls order is
1706 undefined. If the file is available in the revision being
1710 undefined. If the file is available in the revision being
1707 committed (updated or added), filectxfn returns a memfilectx
1711 committed (updated or added), filectxfn returns a memfilectx
1708 object. If the file was removed, filectxfn raises an
1712 object. If the file was removed, filectxfn raises an
1709 IOError. Moved files are represented by marking the source file
1713 IOError. Moved files are represented by marking the source file
1710 removed and the new file added with copy information (see
1714 removed and the new file added with copy information (see
1711 memfilectx).
1715 memfilectx).
1712
1716
1713 user receives the committer name and defaults to current
1717 user receives the committer name and defaults to current
1714 repository username, date is the commit date in any format
1718 repository username, date is the commit date in any format
1715 supported by util.parsedate() and defaults to current date, extra
1719 supported by util.parsedate() and defaults to current date, extra
1716 is a dictionary of metadata or is left empty.
1720 is a dictionary of metadata or is left empty.
1717 """
1721 """
1718
1722
1719 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1723 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1720 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1724 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1721 # this field to determine what to do in filectxfn.
1725 # this field to determine what to do in filectxfn.
1722 _returnnoneformissingfiles = True
1726 _returnnoneformissingfiles = True
1723
1727
1724 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1728 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1725 date=None, extra=None, editor=False):
1729 date=None, extra=None, editor=False):
1726 super(memctx, self).__init__(repo, text, user, date, extra)
1730 super(memctx, self).__init__(repo, text, user, date, extra)
1727 self._rev = None
1731 self._rev = None
1728 self._node = None
1732 self._node = None
1729 parents = [(p or nullid) for p in parents]
1733 parents = [(p or nullid) for p in parents]
1730 p1, p2 = parents
1734 p1, p2 = parents
1731 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1735 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1732 files = sorted(set(files))
1736 files = sorted(set(files))
1733 self._files = files
1737 self._files = files
1734 self.substate = {}
1738 self.substate = {}
1735
1739
1736 # if store is not callable, wrap it in a function
1740 # if store is not callable, wrap it in a function
1737 if not callable(filectxfn):
1741 if not callable(filectxfn):
1738 def getfilectx(repo, memctx, path):
1742 def getfilectx(repo, memctx, path):
1739 fctx = filectxfn[path]
1743 fctx = filectxfn[path]
1740 # this is weird but apparently we only keep track of one parent
1744 # this is weird but apparently we only keep track of one parent
1741 # (why not only store that instead of a tuple?)
1745 # (why not only store that instead of a tuple?)
1742 copied = fctx.renamed()
1746 copied = fctx.renamed()
1743 if copied:
1747 if copied:
1744 copied = copied[0]
1748 copied = copied[0]
1745 return memfilectx(repo, path, fctx.data(),
1749 return memfilectx(repo, path, fctx.data(),
1746 islink=fctx.islink(), isexec=fctx.isexec(),
1750 islink=fctx.islink(), isexec=fctx.isexec(),
1747 copied=copied, memctx=memctx)
1751 copied=copied, memctx=memctx)
1748 self._filectxfn = getfilectx
1752 self._filectxfn = getfilectx
1749 else:
1753 else:
1750 # "util.cachefunc" reduces invocation of possibly expensive
1754 # "util.cachefunc" reduces invocation of possibly expensive
1751 # "filectxfn" for performance (e.g. converting from another VCS)
1755 # "filectxfn" for performance (e.g. converting from another VCS)
1752 self._filectxfn = util.cachefunc(filectxfn)
1756 self._filectxfn = util.cachefunc(filectxfn)
1753
1757
1754 self._extra = extra and extra.copy() or {}
1758 self._extra = extra and extra.copy() or {}
1755 if self._extra.get('branch', '') == '':
1759 if self._extra.get('branch', '') == '':
1756 self._extra['branch'] = 'default'
1760 self._extra['branch'] = 'default'
1757
1761
1758 if editor:
1762 if editor:
1759 self._text = editor(self._repo, self, [])
1763 self._text = editor(self._repo, self, [])
1760 self._repo.savecommitmessage(self._text)
1764 self._repo.savecommitmessage(self._text)
1761
1765
1762 def filectx(self, path, filelog=None):
1766 def filectx(self, path, filelog=None):
1763 """get a file context from the working directory
1767 """get a file context from the working directory
1764
1768
1765 Returns None if file doesn't exist and should be removed."""
1769 Returns None if file doesn't exist and should be removed."""
1766 return self._filectxfn(self._repo, self, path)
1770 return self._filectxfn(self._repo, self, path)
1767
1771
1768 def commit(self):
1772 def commit(self):
1769 """commit context to the repo"""
1773 """commit context to the repo"""
1770 return self._repo.commitctx(self)
1774 return self._repo.commitctx(self)
1771
1775
1772 @propertycache
1776 @propertycache
1773 def _manifest(self):
1777 def _manifest(self):
1774 """generate a manifest based on the return values of filectxfn"""
1778 """generate a manifest based on the return values of filectxfn"""
1775
1779
1776 # keep this simple for now; just worry about p1
1780 # keep this simple for now; just worry about p1
1777 pctx = self._parents[0]
1781 pctx = self._parents[0]
1778 man = pctx.manifest().copy()
1782 man = pctx.manifest().copy()
1779
1783
1780 for f in self._status.modified:
1784 for f in self._status.modified:
1781 p1node = nullid
1785 p1node = nullid
1782 p2node = nullid
1786 p2node = nullid
1783 p = pctx[f].parents() # if file isn't in pctx, check p2?
1787 p = pctx[f].parents() # if file isn't in pctx, check p2?
1784 if len(p) > 0:
1788 if len(p) > 0:
1785 p1node = p[0].node()
1789 p1node = p[0].node()
1786 if len(p) > 1:
1790 if len(p) > 1:
1787 p2node = p[1].node()
1791 p2node = p[1].node()
1788 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1792 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1789
1793
1790 for f in self._status.added:
1794 for f in self._status.added:
1791 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1795 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1792
1796
1793 for f in self._status.removed:
1797 for f in self._status.removed:
1794 if f in man:
1798 if f in man:
1795 del man[f]
1799 del man[f]
1796
1800
1797 return man
1801 return man
1798
1802
1799 @propertycache
1803 @propertycache
1800 def _status(self):
1804 def _status(self):
1801 """Calculate exact status from ``files`` specified at construction
1805 """Calculate exact status from ``files`` specified at construction
1802 """
1806 """
1803 man1 = self.p1().manifest()
1807 man1 = self.p1().manifest()
1804 p2 = self._parents[1]
1808 p2 = self._parents[1]
1805 # "1 < len(self._parents)" can't be used for checking
1809 # "1 < len(self._parents)" can't be used for checking
1806 # existence of the 2nd parent, because "memctx._parents" is
1810 # existence of the 2nd parent, because "memctx._parents" is
1807 # explicitly initialized by the list, of which length is 2.
1811 # explicitly initialized by the list, of which length is 2.
1808 if p2.node() != nullid:
1812 if p2.node() != nullid:
1809 man2 = p2.manifest()
1813 man2 = p2.manifest()
1810 managing = lambda f: f in man1 or f in man2
1814 managing = lambda f: f in man1 or f in man2
1811 else:
1815 else:
1812 managing = lambda f: f in man1
1816 managing = lambda f: f in man1
1813
1817
1814 modified, added, removed = [], [], []
1818 modified, added, removed = [], [], []
1815 for f in self._files:
1819 for f in self._files:
1816 if not managing(f):
1820 if not managing(f):
1817 added.append(f)
1821 added.append(f)
1818 elif self[f]:
1822 elif self[f]:
1819 modified.append(f)
1823 modified.append(f)
1820 else:
1824 else:
1821 removed.append(f)
1825 removed.append(f)
1822
1826
1823 return scmutil.status(modified, added, removed, [], [], [], [])
1827 return scmutil.status(modified, added, removed, [], [], [], [])
1824
1828
1825 class memfilectx(committablefilectx):
1829 class memfilectx(committablefilectx):
1826 """memfilectx represents an in-memory file to commit.
1830 """memfilectx represents an in-memory file to commit.
1827
1831
1828 See memctx and committablefilectx for more details.
1832 See memctx and committablefilectx for more details.
1829 """
1833 """
1830 def __init__(self, repo, path, data, islink=False,
1834 def __init__(self, repo, path, data, islink=False,
1831 isexec=False, copied=None, memctx=None):
1835 isexec=False, copied=None, memctx=None):
1832 """
1836 """
1833 path is the normalized file path relative to repository root.
1837 path is the normalized file path relative to repository root.
1834 data is the file content as a string.
1838 data is the file content as a string.
1835 islink is True if the file is a symbolic link.
1839 islink is True if the file is a symbolic link.
1836 isexec is True if the file is executable.
1840 isexec is True if the file is executable.
1837 copied is the source file path if current file was copied in the
1841 copied is the source file path if current file was copied in the
1838 revision being committed, or None."""
1842 revision being committed, or None."""
1839 super(memfilectx, self).__init__(repo, path, None, memctx)
1843 super(memfilectx, self).__init__(repo, path, None, memctx)
1840 self._data = data
1844 self._data = data
1841 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1845 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1842 self._copied = None
1846 self._copied = None
1843 if copied:
1847 if copied:
1844 self._copied = (copied, nullid)
1848 self._copied = (copied, nullid)
1845
1849
1846 def data(self):
1850 def data(self):
1847 return self._data
1851 return self._data
1848 def size(self):
1852 def size(self):
1849 return len(self.data())
1853 return len(self.data())
1850 def flags(self):
1854 def flags(self):
1851 return self._flags
1855 return self._flags
1852 def renamed(self):
1856 def renamed(self):
1853 return self._copied
1857 return self._copied
1854
1858
1855 def remove(self, ignoremissing=False):
1859 def remove(self, ignoremissing=False):
1856 """wraps unlink for a repo's working directory"""
1860 """wraps unlink for a repo's working directory"""
1857 # need to figure out what to do here
1861 # need to figure out what to do here
1858 del self._changectx[self._path]
1862 del self._changectx[self._path]
1859
1863
1860 def write(self, data, flags):
1864 def write(self, data, flags):
1861 """wraps repo.wwrite"""
1865 """wraps repo.wwrite"""
1862 self._data = data
1866 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now