##// END OF EJS Templates
context: use unfiltered repo for '.'...
Martin von Zweigbergk -
r24050:a9b61dbd default
parent child Browse files
Show More
@@ -1,1866 +1,1868
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 for f in sorted(self._manifest):
69 for f in sorted(self._manifest):
70 yield f
70 yield f
71
71
72 def _manifestmatches(self, match, s):
72 def _manifestmatches(self, match, s):
73 """generate a new manifest filtered by the match argument
73 """generate a new manifest filtered by the match argument
74
74
75 This method is for internal use only and mainly exists to provide an
75 This method is for internal use only and mainly exists to provide an
76 object oriented way for other contexts to customize the manifest
76 object oriented way for other contexts to customize the manifest
77 generation.
77 generation.
78 """
78 """
79 return self.manifest().matches(match)
79 return self.manifest().matches(match)
80
80
81 def _matchstatus(self, other, match):
81 def _matchstatus(self, other, match):
82 """return match.always if match is none
82 """return match.always if match is none
83
83
84 This internal method provides a way for child objects to override the
84 This internal method provides a way for child objects to override the
85 match operator.
85 match operator.
86 """
86 """
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88
88
89 def _buildstatus(self, other, s, match, listignored, listclean,
89 def _buildstatus(self, other, s, match, listignored, listclean,
90 listunknown):
90 listunknown):
91 """build a status with respect to another context"""
91 """build a status with respect to another context"""
92 # Load earliest manifest first for caching reasons. More specifically,
92 # Load earliest manifest first for caching reasons. More specifically,
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta to what's in the cache. So that's one full reconstruction + one
97 # delta application.
97 # delta application.
98 if self.rev() is not None and self.rev() < other.rev():
98 if self.rev() is not None and self.rev() < other.rev():
99 self.manifest()
99 self.manifest()
100 mf1 = other._manifestmatches(match, s)
100 mf1 = other._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
102
102
103 modified, added = [], []
103 modified, added = [], []
104 removed = []
104 removed = []
105 clean = []
105 clean = []
106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
107 deletedset = set(deleted)
107 deletedset = set(deleted)
108 d = mf1.diff(mf2, clean=listclean)
108 d = mf1.diff(mf2, clean=listclean)
109 for fn, value in d.iteritems():
109 for fn, value in d.iteritems():
110 if fn in deletedset:
110 if fn in deletedset:
111 continue
111 continue
112 if value is None:
112 if value is None:
113 clean.append(fn)
113 clean.append(fn)
114 continue
114 continue
115 (node1, flag1), (node2, flag2) = value
115 (node1, flag1), (node2, flag2) = value
116 if node1 is None:
116 if node1 is None:
117 added.append(fn)
117 added.append(fn)
118 elif node2 is None:
118 elif node2 is None:
119 removed.append(fn)
119 removed.append(fn)
120 elif node2 != _newnode:
120 elif node2 != _newnode:
121 # The file was not a new file in mf2, so an entry
121 # The file was not a new file in mf2, so an entry
122 # from diff is really a difference.
122 # from diff is really a difference.
123 modified.append(fn)
123 modified.append(fn)
124 elif self[fn].cmp(other[fn]):
124 elif self[fn].cmp(other[fn]):
125 # node2 was newnode, but the working file doesn't
125 # node2 was newnode, but the working file doesn't
126 # match the one in mf1.
126 # match the one in mf1.
127 modified.append(fn)
127 modified.append(fn)
128 else:
128 else:
129 clean.append(fn)
129 clean.append(fn)
130
130
131 if removed:
131 if removed:
132 # need to filter files if they are already reported as removed
132 # need to filter files if they are already reported as removed
133 unknown = [fn for fn in unknown if fn not in mf1]
133 unknown = [fn for fn in unknown if fn not in mf1]
134 ignored = [fn for fn in ignored if fn not in mf1]
134 ignored = [fn for fn in ignored if fn not in mf1]
135 # if they're deleted, don't report them as removed
135 # if they're deleted, don't report them as removed
136 removed = [fn for fn in removed if fn not in deletedset]
136 removed = [fn for fn in removed if fn not in deletedset]
137
137
138 return scmutil.status(modified, added, removed, deleted, unknown,
138 return scmutil.status(modified, added, removed, deleted, unknown,
139 ignored, clean)
139 ignored, clean)
140
140
141 @propertycache
141 @propertycache
142 def substate(self):
142 def substate(self):
143 return subrepo.state(self, self._repo.ui)
143 return subrepo.state(self, self._repo.ui)
144
144
145 def subrev(self, subpath):
145 def subrev(self, subpath):
146 return self.substate[subpath][1]
146 return self.substate[subpath][1]
147
147
148 def rev(self):
148 def rev(self):
149 return self._rev
149 return self._rev
150 def node(self):
150 def node(self):
151 return self._node
151 return self._node
152 def hex(self):
152 def hex(self):
153 return hex(self.node())
153 return hex(self.node())
154 def manifest(self):
154 def manifest(self):
155 return self._manifest
155 return self._manifest
156 def phasestr(self):
156 def phasestr(self):
157 return phases.phasenames[self.phase()]
157 return phases.phasenames[self.phase()]
158 def mutable(self):
158 def mutable(self):
159 return self.phase() > phases.public
159 return self.phase() > phases.public
160
160
161 def getfileset(self, expr):
161 def getfileset(self, expr):
162 return fileset.getfileset(self, expr)
162 return fileset.getfileset(self, expr)
163
163
164 def obsolete(self):
164 def obsolete(self):
165 """True if the changeset is obsolete"""
165 """True if the changeset is obsolete"""
166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
166 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167
167
168 def extinct(self):
168 def extinct(self):
169 """True if the changeset is extinct"""
169 """True if the changeset is extinct"""
170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
170 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171
171
172 def unstable(self):
172 def unstable(self):
173 """True if the changeset is not obsolete but it's ancestor are"""
173 """True if the changeset is not obsolete but it's ancestor are"""
174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
174 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175
175
176 def bumped(self):
176 def bumped(self):
177 """True if the changeset try to be a successor of a public changeset
177 """True if the changeset try to be a successor of a public changeset
178
178
179 Only non-public and non-obsolete changesets may be bumped.
179 Only non-public and non-obsolete changesets may be bumped.
180 """
180 """
181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
181 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182
182
183 def divergent(self):
183 def divergent(self):
184 """Is a successors of a changeset with multiple possible successors set
184 """Is a successors of a changeset with multiple possible successors set
185
185
186 Only non-public and non-obsolete changesets may be divergent.
186 Only non-public and non-obsolete changesets may be divergent.
187 """
187 """
188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
188 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189
189
190 def troubled(self):
190 def troubled(self):
191 """True if the changeset is either unstable, bumped or divergent"""
191 """True if the changeset is either unstable, bumped or divergent"""
192 return self.unstable() or self.bumped() or self.divergent()
192 return self.unstable() or self.bumped() or self.divergent()
193
193
194 def troubles(self):
194 def troubles(self):
195 """return the list of troubles affecting this changesets.
195 """return the list of troubles affecting this changesets.
196
196
197 Troubles are returned as strings. possible values are:
197 Troubles are returned as strings. possible values are:
198 - unstable,
198 - unstable,
199 - bumped,
199 - bumped,
200 - divergent.
200 - divergent.
201 """
201 """
202 troubles = []
202 troubles = []
203 if self.unstable():
203 if self.unstable():
204 troubles.append('unstable')
204 troubles.append('unstable')
205 if self.bumped():
205 if self.bumped():
206 troubles.append('bumped')
206 troubles.append('bumped')
207 if self.divergent():
207 if self.divergent():
208 troubles.append('divergent')
208 troubles.append('divergent')
209 return troubles
209 return troubles
210
210
211 def parents(self):
211 def parents(self):
212 """return contexts for each parent changeset"""
212 """return contexts for each parent changeset"""
213 return self._parents
213 return self._parents
214
214
215 def p1(self):
215 def p1(self):
216 return self._parents[0]
216 return self._parents[0]
217
217
218 def p2(self):
218 def p2(self):
219 if len(self._parents) == 2:
219 if len(self._parents) == 2:
220 return self._parents[1]
220 return self._parents[1]
221 return changectx(self._repo, -1)
221 return changectx(self._repo, -1)
222
222
223 def _fileinfo(self, path):
223 def _fileinfo(self, path):
224 if '_manifest' in self.__dict__:
224 if '_manifest' in self.__dict__:
225 try:
225 try:
226 return self._manifest[path], self._manifest.flags(path)
226 return self._manifest[path], self._manifest.flags(path)
227 except KeyError:
227 except KeyError:
228 raise error.ManifestLookupError(self._node, path,
228 raise error.ManifestLookupError(self._node, path,
229 _('not found in manifest'))
229 _('not found in manifest'))
230 if '_manifestdelta' in self.__dict__ or path in self.files():
230 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if path in self._manifestdelta:
231 if path in self._manifestdelta:
232 return (self._manifestdelta[path],
232 return (self._manifestdelta[path],
233 self._manifestdelta.flags(path))
233 self._manifestdelta.flags(path))
234 node, flag = self._repo.manifest.find(self._changeset[0], path)
234 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 if not node:
235 if not node:
236 raise error.ManifestLookupError(self._node, path,
236 raise error.ManifestLookupError(self._node, path,
237 _('not found in manifest'))
237 _('not found in manifest'))
238
238
239 return node, flag
239 return node, flag
240
240
241 def filenode(self, path):
241 def filenode(self, path):
242 return self._fileinfo(path)[0]
242 return self._fileinfo(path)[0]
243
243
244 def flags(self, path):
244 def flags(self, path):
245 try:
245 try:
246 return self._fileinfo(path)[1]
246 return self._fileinfo(path)[1]
247 except error.LookupError:
247 except error.LookupError:
248 return ''
248 return ''
249
249
250 def sub(self, path):
250 def sub(self, path):
251 return subrepo.subrepo(self, path)
251 return subrepo.subrepo(self, path)
252
252
253 def match(self, pats=[], include=None, exclude=None, default='glob'):
253 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 r = self._repo
254 r = self._repo
255 return matchmod.match(r.root, r.getcwd(), pats,
255 return matchmod.match(r.root, r.getcwd(), pats,
256 include, exclude, default,
256 include, exclude, default,
257 auditor=r.auditor, ctx=self)
257 auditor=r.auditor, ctx=self)
258
258
259 def diff(self, ctx2=None, match=None, **opts):
259 def diff(self, ctx2=None, match=None, **opts):
260 """Returns a diff generator for the given contexts and matcher"""
260 """Returns a diff generator for the given contexts and matcher"""
261 if ctx2 is None:
261 if ctx2 is None:
262 ctx2 = self.p1()
262 ctx2 = self.p1()
263 if ctx2 is not None:
263 if ctx2 is not None:
264 ctx2 = self._repo[ctx2]
264 ctx2 = self._repo[ctx2]
265 diffopts = patch.diffopts(self._repo.ui, opts)
265 diffopts = patch.diffopts(self._repo.ui, opts)
266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
266 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267
267
268 @propertycache
268 @propertycache
269 def _dirs(self):
269 def _dirs(self):
270 return scmutil.dirs(self._manifest)
270 return scmutil.dirs(self._manifest)
271
271
272 def dirs(self):
272 def dirs(self):
273 return self._dirs
273 return self._dirs
274
274
275 def dirty(self, missing=False, merge=True, branch=True):
275 def dirty(self, missing=False, merge=True, branch=True):
276 return False
276 return False
277
277
278 def status(self, other=None, match=None, listignored=False,
278 def status(self, other=None, match=None, listignored=False,
279 listclean=False, listunknown=False, listsubrepos=False):
279 listclean=False, listunknown=False, listsubrepos=False):
280 """return status of files between two nodes or node and working
280 """return status of files between two nodes or node and working
281 directory.
281 directory.
282
282
283 If other is None, compare this node with working directory.
283 If other is None, compare this node with working directory.
284
284
285 returns (modified, added, removed, deleted, unknown, ignored, clean)
285 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 """
286 """
287
287
288 ctx1 = self
288 ctx1 = self
289 ctx2 = self._repo[other]
289 ctx2 = self._repo[other]
290
290
291 # This next code block is, admittedly, fragile logic that tests for
291 # This next code block is, admittedly, fragile logic that tests for
292 # reversing the contexts and wouldn't need to exist if it weren't for
292 # reversing the contexts and wouldn't need to exist if it weren't for
293 # the fast (and common) code path of comparing the working directory
293 # the fast (and common) code path of comparing the working directory
294 # with its first parent.
294 # with its first parent.
295 #
295 #
296 # What we're aiming for here is the ability to call:
296 # What we're aiming for here is the ability to call:
297 #
297 #
298 # workingctx.status(parentctx)
298 # workingctx.status(parentctx)
299 #
299 #
300 # If we always built the manifest for each context and compared those,
300 # If we always built the manifest for each context and compared those,
301 # then we'd be done. But the special case of the above call means we
301 # then we'd be done. But the special case of the above call means we
302 # just copy the manifest of the parent.
302 # just copy the manifest of the parent.
303 reversed = False
303 reversed = False
304 if (not isinstance(ctx1, changectx)
304 if (not isinstance(ctx1, changectx)
305 and isinstance(ctx2, changectx)):
305 and isinstance(ctx2, changectx)):
306 reversed = True
306 reversed = True
307 ctx1, ctx2 = ctx2, ctx1
307 ctx1, ctx2 = ctx2, ctx1
308
308
309 match = ctx2._matchstatus(ctx1, match)
309 match = ctx2._matchstatus(ctx1, match)
310 r = scmutil.status([], [], [], [], [], [], [])
310 r = scmutil.status([], [], [], [], [], [], [])
311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
311 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 listunknown)
312 listunknown)
313
313
314 if reversed:
314 if reversed:
315 # Reverse added and removed. Clear deleted, unknown and ignored as
315 # Reverse added and removed. Clear deleted, unknown and ignored as
316 # these make no sense to reverse.
316 # these make no sense to reverse.
317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
317 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 r.clean)
318 r.clean)
319
319
320 if listsubrepos:
320 if listsubrepos:
321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
321 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 rev2 = ctx2.subrev(subpath)
322 rev2 = ctx2.subrev(subpath)
323 try:
323 try:
324 submatch = matchmod.narrowmatcher(subpath, match)
324 submatch = matchmod.narrowmatcher(subpath, match)
325 s = sub.status(rev2, match=submatch, ignored=listignored,
325 s = sub.status(rev2, match=submatch, ignored=listignored,
326 clean=listclean, unknown=listunknown,
326 clean=listclean, unknown=listunknown,
327 listsubrepos=True)
327 listsubrepos=True)
328 for rfiles, sfiles in zip(r, s):
328 for rfiles, sfiles in zip(r, s):
329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
329 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 except error.LookupError:
330 except error.LookupError:
331 self._repo.ui.status(_("skipping missing "
331 self._repo.ui.status(_("skipping missing "
332 "subrepository: %s\n") % subpath)
332 "subrepository: %s\n") % subpath)
333
333
334 for l in r:
334 for l in r:
335 l.sort()
335 l.sort()
336
336
337 return r
337 return r
338
338
339
339
340 def makememctx(repo, parents, text, user, date, branch, files, store,
340 def makememctx(repo, parents, text, user, date, branch, files, store,
341 editor=None):
341 editor=None):
342 def getfilectx(repo, memctx, path):
342 def getfilectx(repo, memctx, path):
343 data, mode, copied = store.getfile(path)
343 data, mode, copied = store.getfile(path)
344 if data is None:
344 if data is None:
345 return None
345 return None
346 islink, isexec = mode
346 islink, isexec = mode
347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
347 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 copied=copied, memctx=memctx)
348 copied=copied, memctx=memctx)
349 extra = {}
349 extra = {}
350 if branch:
350 if branch:
351 extra['branch'] = encoding.fromlocal(branch)
351 extra['branch'] = encoding.fromlocal(branch)
352 ctx = memctx(repo, parents, text, files, getfilectx, user,
352 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 date, extra, editor)
353 date, extra, editor)
354 return ctx
354 return ctx
355
355
356 class changectx(basectx):
356 class changectx(basectx):
357 """A changecontext object makes access to data related to a particular
357 """A changecontext object makes access to data related to a particular
358 changeset convenient. It represents a read-only context already present in
358 changeset convenient. It represents a read-only context already present in
359 the repo."""
359 the repo."""
360 def __init__(self, repo, changeid=''):
360 def __init__(self, repo, changeid=''):
361 """changeid is a revision number, node, or tag"""
361 """changeid is a revision number, node, or tag"""
362
362
363 # since basectx.__new__ already took care of copying the object, we
363 # since basectx.__new__ already took care of copying the object, we
364 # don't need to do anything in __init__, so we just exit here
364 # don't need to do anything in __init__, so we just exit here
365 if isinstance(changeid, basectx):
365 if isinstance(changeid, basectx):
366 return
366 return
367
367
368 if changeid == '':
368 if changeid == '':
369 changeid = '.'
369 changeid = '.'
370 self._repo = repo
370 self._repo = repo
371
371
372 try:
372 try:
373 if isinstance(changeid, int):
373 if isinstance(changeid, int):
374 self._node = repo.changelog.node(changeid)
374 self._node = repo.changelog.node(changeid)
375 self._rev = changeid
375 self._rev = changeid
376 return
376 return
377 if isinstance(changeid, long):
377 if isinstance(changeid, long):
378 changeid = str(changeid)
378 changeid = str(changeid)
379 if changeid == '.':
380 self._node = repo.dirstate.p1()
381 self._rev = repo.changelog.rev(self._node)
382 return
383 if changeid == 'null':
379 if changeid == 'null':
384 self._node = nullid
380 self._node = nullid
385 self._rev = nullrev
381 self._rev = nullrev
386 return
382 return
387 if changeid == 'tip':
383 if changeid == 'tip':
388 self._node = repo.changelog.tip()
384 self._node = repo.changelog.tip()
389 self._rev = repo.changelog.rev(self._node)
385 self._rev = repo.changelog.rev(self._node)
390 return
386 return
387 if changeid == '.' or changeid == repo.dirstate.p1():
388 # this is a hack to delay/avoid loading obsmarkers
389 # when we know that '.' won't be hidden
390 self._node = repo.dirstate.p1()
391 self._rev = repo.unfiltered().changelog.rev(self._node)
392 return
391 if len(changeid) == 20:
393 if len(changeid) == 20:
392 try:
394 try:
393 self._node = changeid
395 self._node = changeid
394 self._rev = repo.changelog.rev(changeid)
396 self._rev = repo.changelog.rev(changeid)
395 return
397 return
396 except error.FilteredRepoLookupError:
398 except error.FilteredRepoLookupError:
397 raise
399 raise
398 except LookupError:
400 except LookupError:
399 pass
401 pass
400
402
401 try:
403 try:
402 r = int(changeid)
404 r = int(changeid)
403 if str(r) != changeid:
405 if str(r) != changeid:
404 raise ValueError
406 raise ValueError
405 l = len(repo.changelog)
407 l = len(repo.changelog)
406 if r < 0:
408 if r < 0:
407 r += l
409 r += l
408 if r < 0 or r >= l:
410 if r < 0 or r >= l:
409 raise ValueError
411 raise ValueError
410 self._rev = r
412 self._rev = r
411 self._node = repo.changelog.node(r)
413 self._node = repo.changelog.node(r)
412 return
414 return
413 except error.FilteredIndexError:
415 except error.FilteredIndexError:
414 raise
416 raise
415 except (ValueError, OverflowError, IndexError):
417 except (ValueError, OverflowError, IndexError):
416 pass
418 pass
417
419
418 if len(changeid) == 40:
420 if len(changeid) == 40:
419 try:
421 try:
420 self._node = bin(changeid)
422 self._node = bin(changeid)
421 self._rev = repo.changelog.rev(self._node)
423 self._rev = repo.changelog.rev(self._node)
422 return
424 return
423 except error.FilteredLookupError:
425 except error.FilteredLookupError:
424 raise
426 raise
425 except (TypeError, LookupError):
427 except (TypeError, LookupError):
426 pass
428 pass
427
429
428 # lookup bookmarks through the name interface
430 # lookup bookmarks through the name interface
429 try:
431 try:
430 self._node = repo.names.singlenode(repo, changeid)
432 self._node = repo.names.singlenode(repo, changeid)
431 self._rev = repo.changelog.rev(self._node)
433 self._rev = repo.changelog.rev(self._node)
432 return
434 return
433 except KeyError:
435 except KeyError:
434 pass
436 pass
435 except error.FilteredRepoLookupError:
437 except error.FilteredRepoLookupError:
436 raise
438 raise
437 except error.RepoLookupError:
439 except error.RepoLookupError:
438 pass
440 pass
439
441
440 self._node = repo.unfiltered().changelog._partialmatch(changeid)
442 self._node = repo.unfiltered().changelog._partialmatch(changeid)
441 if self._node is not None:
443 if self._node is not None:
442 self._rev = repo.changelog.rev(self._node)
444 self._rev = repo.changelog.rev(self._node)
443 return
445 return
444
446
445 # lookup failed
447 # lookup failed
446 # check if it might have come from damaged dirstate
448 # check if it might have come from damaged dirstate
447 #
449 #
448 # XXX we could avoid the unfiltered if we had a recognizable
450 # XXX we could avoid the unfiltered if we had a recognizable
449 # exception for filtered changeset access
451 # exception for filtered changeset access
450 if changeid in repo.unfiltered().dirstate.parents():
452 if changeid in repo.unfiltered().dirstate.parents():
451 msg = _("working directory has unknown parent '%s'!")
453 msg = _("working directory has unknown parent '%s'!")
452 raise error.Abort(msg % short(changeid))
454 raise error.Abort(msg % short(changeid))
453 try:
455 try:
454 if len(changeid) == 20:
456 if len(changeid) == 20:
455 changeid = hex(changeid)
457 changeid = hex(changeid)
456 except TypeError:
458 except TypeError:
457 pass
459 pass
458 except (error.FilteredIndexError, error.FilteredLookupError,
460 except (error.FilteredIndexError, error.FilteredLookupError,
459 error.FilteredRepoLookupError):
461 error.FilteredRepoLookupError):
460 if repo.filtername == 'visible':
462 if repo.filtername == 'visible':
461 msg = _("hidden revision '%s'") % changeid
463 msg = _("hidden revision '%s'") % changeid
462 hint = _('use --hidden to access hidden revisions')
464 hint = _('use --hidden to access hidden revisions')
463 raise error.FilteredRepoLookupError(msg, hint=hint)
465 raise error.FilteredRepoLookupError(msg, hint=hint)
464 msg = _("filtered revision '%s' (not in '%s' subset)")
466 msg = _("filtered revision '%s' (not in '%s' subset)")
465 msg %= (changeid, repo.filtername)
467 msg %= (changeid, repo.filtername)
466 raise error.FilteredRepoLookupError(msg)
468 raise error.FilteredRepoLookupError(msg)
467 except IndexError:
469 except IndexError:
468 pass
470 pass
469 raise error.RepoLookupError(
471 raise error.RepoLookupError(
470 _("unknown revision '%s'") % changeid)
472 _("unknown revision '%s'") % changeid)
471
473
472 def __hash__(self):
474 def __hash__(self):
473 try:
475 try:
474 return hash(self._rev)
476 return hash(self._rev)
475 except AttributeError:
477 except AttributeError:
476 return id(self)
478 return id(self)
477
479
478 def __nonzero__(self):
480 def __nonzero__(self):
479 return self._rev != nullrev
481 return self._rev != nullrev
480
482
481 @propertycache
483 @propertycache
482 def _changeset(self):
484 def _changeset(self):
483 return self._repo.changelog.read(self.rev())
485 return self._repo.changelog.read(self.rev())
484
486
485 @propertycache
487 @propertycache
486 def _manifest(self):
488 def _manifest(self):
487 return self._repo.manifest.read(self._changeset[0])
489 return self._repo.manifest.read(self._changeset[0])
488
490
489 @propertycache
491 @propertycache
490 def _manifestdelta(self):
492 def _manifestdelta(self):
491 return self._repo.manifest.readdelta(self._changeset[0])
493 return self._repo.manifest.readdelta(self._changeset[0])
492
494
493 @propertycache
495 @propertycache
494 def _parents(self):
496 def _parents(self):
495 p = self._repo.changelog.parentrevs(self._rev)
497 p = self._repo.changelog.parentrevs(self._rev)
496 if p[1] == nullrev:
498 if p[1] == nullrev:
497 p = p[:-1]
499 p = p[:-1]
498 return [changectx(self._repo, x) for x in p]
500 return [changectx(self._repo, x) for x in p]
499
501
500 def changeset(self):
502 def changeset(self):
501 return self._changeset
503 return self._changeset
502 def manifestnode(self):
504 def manifestnode(self):
503 return self._changeset[0]
505 return self._changeset[0]
504
506
505 def user(self):
507 def user(self):
506 return self._changeset[1]
508 return self._changeset[1]
507 def date(self):
509 def date(self):
508 return self._changeset[2]
510 return self._changeset[2]
509 def files(self):
511 def files(self):
510 return self._changeset[3]
512 return self._changeset[3]
511 def description(self):
513 def description(self):
512 return self._changeset[4]
514 return self._changeset[4]
513 def branch(self):
515 def branch(self):
514 return encoding.tolocal(self._changeset[5].get("branch"))
516 return encoding.tolocal(self._changeset[5].get("branch"))
515 def closesbranch(self):
517 def closesbranch(self):
516 return 'close' in self._changeset[5]
518 return 'close' in self._changeset[5]
517 def extra(self):
519 def extra(self):
518 return self._changeset[5]
520 return self._changeset[5]
519 def tags(self):
521 def tags(self):
520 return self._repo.nodetags(self._node)
522 return self._repo.nodetags(self._node)
521 def bookmarks(self):
523 def bookmarks(self):
522 return self._repo.nodebookmarks(self._node)
524 return self._repo.nodebookmarks(self._node)
523 def phase(self):
525 def phase(self):
524 return self._repo._phasecache.phase(self._repo, self._rev)
526 return self._repo._phasecache.phase(self._repo, self._rev)
525 def hidden(self):
527 def hidden(self):
526 return self._rev in repoview.filterrevs(self._repo, 'visible')
528 return self._rev in repoview.filterrevs(self._repo, 'visible')
527
529
528 def children(self):
530 def children(self):
529 """return contexts for each child changeset"""
531 """return contexts for each child changeset"""
530 c = self._repo.changelog.children(self._node)
532 c = self._repo.changelog.children(self._node)
531 return [changectx(self._repo, x) for x in c]
533 return [changectx(self._repo, x) for x in c]
532
534
533 def ancestors(self):
535 def ancestors(self):
534 for a in self._repo.changelog.ancestors([self._rev]):
536 for a in self._repo.changelog.ancestors([self._rev]):
535 yield changectx(self._repo, a)
537 yield changectx(self._repo, a)
536
538
537 def descendants(self):
539 def descendants(self):
538 for d in self._repo.changelog.descendants([self._rev]):
540 for d in self._repo.changelog.descendants([self._rev]):
539 yield changectx(self._repo, d)
541 yield changectx(self._repo, d)
540
542
541 def filectx(self, path, fileid=None, filelog=None):
543 def filectx(self, path, fileid=None, filelog=None):
542 """get a file context from this changeset"""
544 """get a file context from this changeset"""
543 if fileid is None:
545 if fileid is None:
544 fileid = self.filenode(path)
546 fileid = self.filenode(path)
545 return filectx(self._repo, path, fileid=fileid,
547 return filectx(self._repo, path, fileid=fileid,
546 changectx=self, filelog=filelog)
548 changectx=self, filelog=filelog)
547
549
548 def ancestor(self, c2, warn=False):
550 def ancestor(self, c2, warn=False):
549 """return the "best" ancestor context of self and c2
551 """return the "best" ancestor context of self and c2
550
552
551 If there are multiple candidates, it will show a message and check
553 If there are multiple candidates, it will show a message and check
552 merge.preferancestor configuration before falling back to the
554 merge.preferancestor configuration before falling back to the
553 revlog ancestor."""
555 revlog ancestor."""
554 # deal with workingctxs
556 # deal with workingctxs
555 n2 = c2._node
557 n2 = c2._node
556 if n2 is None:
558 if n2 is None:
557 n2 = c2._parents[0]._node
559 n2 = c2._parents[0]._node
558 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
560 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
559 if not cahs:
561 if not cahs:
560 anc = nullid
562 anc = nullid
561 elif len(cahs) == 1:
563 elif len(cahs) == 1:
562 anc = cahs[0]
564 anc = cahs[0]
563 else:
565 else:
564 for r in self._repo.ui.configlist('merge', 'preferancestor'):
566 for r in self._repo.ui.configlist('merge', 'preferancestor'):
565 try:
567 try:
566 ctx = changectx(self._repo, r)
568 ctx = changectx(self._repo, r)
567 except error.RepoLookupError:
569 except error.RepoLookupError:
568 continue
570 continue
569 anc = ctx.node()
571 anc = ctx.node()
570 if anc in cahs:
572 if anc in cahs:
571 break
573 break
572 else:
574 else:
573 anc = self._repo.changelog.ancestor(self._node, n2)
575 anc = self._repo.changelog.ancestor(self._node, n2)
574 if warn:
576 if warn:
575 self._repo.ui.status(
577 self._repo.ui.status(
576 (_("note: using %s as ancestor of %s and %s\n") %
578 (_("note: using %s as ancestor of %s and %s\n") %
577 (short(anc), short(self._node), short(n2))) +
579 (short(anc), short(self._node), short(n2))) +
578 ''.join(_(" alternatively, use --config "
580 ''.join(_(" alternatively, use --config "
579 "merge.preferancestor=%s\n") %
581 "merge.preferancestor=%s\n") %
580 short(n) for n in sorted(cahs) if n != anc))
582 short(n) for n in sorted(cahs) if n != anc))
581 return changectx(self._repo, anc)
583 return changectx(self._repo, anc)
582
584
583 def descendant(self, other):
585 def descendant(self, other):
584 """True if other is descendant of this changeset"""
586 """True if other is descendant of this changeset"""
585 return self._repo.changelog.descendant(self._rev, other._rev)
587 return self._repo.changelog.descendant(self._rev, other._rev)
586
588
587 def walk(self, match):
589 def walk(self, match):
588 fset = set(match.files())
590 fset = set(match.files())
589 # for dirstate.walk, files=['.'] means "walk the whole tree".
591 # for dirstate.walk, files=['.'] means "walk the whole tree".
590 # follow that here, too
592 # follow that here, too
591 fset.discard('.')
593 fset.discard('.')
592
594
593 # avoid the entire walk if we're only looking for specific files
595 # avoid the entire walk if we're only looking for specific files
594 if fset and not match.anypats():
596 if fset and not match.anypats():
595 if util.all([fn in self for fn in fset]):
597 if util.all([fn in self for fn in fset]):
596 for fn in sorted(fset):
598 for fn in sorted(fset):
597 if match(fn):
599 if match(fn):
598 yield fn
600 yield fn
599 raise StopIteration
601 raise StopIteration
600
602
601 for fn in self:
603 for fn in self:
602 if fn in fset:
604 if fn in fset:
603 # specified pattern is the exact name
605 # specified pattern is the exact name
604 fset.remove(fn)
606 fset.remove(fn)
605 if match(fn):
607 if match(fn):
606 yield fn
608 yield fn
607 for fn in sorted(fset):
609 for fn in sorted(fset):
608 if fn in self._dirs:
610 if fn in self._dirs:
609 # specified pattern is a directory
611 # specified pattern is a directory
610 continue
612 continue
611 match.bad(fn, _('no such file in rev %s') % self)
613 match.bad(fn, _('no such file in rev %s') % self)
612
614
613 def matches(self, match):
615 def matches(self, match):
614 return self.walk(match)
616 return self.walk(match)
615
617
616 class basefilectx(object):
618 class basefilectx(object):
617 """A filecontext object represents the common logic for its children:
619 """A filecontext object represents the common logic for its children:
618 filectx: read-only access to a filerevision that is already present
620 filectx: read-only access to a filerevision that is already present
619 in the repo,
621 in the repo,
620 workingfilectx: a filecontext that represents files from the working
622 workingfilectx: a filecontext that represents files from the working
621 directory,
623 directory,
622 memfilectx: a filecontext that represents files in-memory."""
624 memfilectx: a filecontext that represents files in-memory."""
623 def __new__(cls, repo, path, *args, **kwargs):
625 def __new__(cls, repo, path, *args, **kwargs):
624 return super(basefilectx, cls).__new__(cls)
626 return super(basefilectx, cls).__new__(cls)
625
627
626 @propertycache
628 @propertycache
627 def _filelog(self):
629 def _filelog(self):
628 return self._repo.file(self._path)
630 return self._repo.file(self._path)
629
631
630 @propertycache
632 @propertycache
631 def _changeid(self):
633 def _changeid(self):
632 if '_changeid' in self.__dict__:
634 if '_changeid' in self.__dict__:
633 return self._changeid
635 return self._changeid
634 elif '_changectx' in self.__dict__:
636 elif '_changectx' in self.__dict__:
635 return self._changectx.rev()
637 return self._changectx.rev()
636 elif '_descendantrev' in self.__dict__:
638 elif '_descendantrev' in self.__dict__:
637 # this file context was created from a revision with a known
639 # this file context was created from a revision with a known
638 # descendant, we can (lazily) correct for linkrev aliases
640 # descendant, we can (lazily) correct for linkrev aliases
639 return self._adjustlinkrev(self._path, self._filelog,
641 return self._adjustlinkrev(self._path, self._filelog,
640 self._filenode, self._descendantrev)
642 self._filenode, self._descendantrev)
641 else:
643 else:
642 return self._filelog.linkrev(self._filerev)
644 return self._filelog.linkrev(self._filerev)
643
645
644 @propertycache
646 @propertycache
645 def _filenode(self):
647 def _filenode(self):
646 if '_fileid' in self.__dict__:
648 if '_fileid' in self.__dict__:
647 return self._filelog.lookup(self._fileid)
649 return self._filelog.lookup(self._fileid)
648 else:
650 else:
649 return self._changectx.filenode(self._path)
651 return self._changectx.filenode(self._path)
650
652
651 @propertycache
653 @propertycache
652 def _filerev(self):
654 def _filerev(self):
653 return self._filelog.rev(self._filenode)
655 return self._filelog.rev(self._filenode)
654
656
655 @propertycache
657 @propertycache
656 def _repopath(self):
658 def _repopath(self):
657 return self._path
659 return self._path
658
660
659 def __nonzero__(self):
661 def __nonzero__(self):
660 try:
662 try:
661 self._filenode
663 self._filenode
662 return True
664 return True
663 except error.LookupError:
665 except error.LookupError:
664 # file is missing
666 # file is missing
665 return False
667 return False
666
668
667 def __str__(self):
669 def __str__(self):
668 return "%s@%s" % (self.path(), self._changectx)
670 return "%s@%s" % (self.path(), self._changectx)
669
671
670 def __repr__(self):
672 def __repr__(self):
671 return "<%s %s>" % (type(self).__name__, str(self))
673 return "<%s %s>" % (type(self).__name__, str(self))
672
674
673 def __hash__(self):
675 def __hash__(self):
674 try:
676 try:
675 return hash((self._path, self._filenode))
677 return hash((self._path, self._filenode))
676 except AttributeError:
678 except AttributeError:
677 return id(self)
679 return id(self)
678
680
679 def __eq__(self, other):
681 def __eq__(self, other):
680 try:
682 try:
681 return (type(self) == type(other) and self._path == other._path
683 return (type(self) == type(other) and self._path == other._path
682 and self._filenode == other._filenode)
684 and self._filenode == other._filenode)
683 except AttributeError:
685 except AttributeError:
684 return False
686 return False
685
687
686 def __ne__(self, other):
688 def __ne__(self, other):
687 return not (self == other)
689 return not (self == other)
688
690
689 def filerev(self):
691 def filerev(self):
690 return self._filerev
692 return self._filerev
691 def filenode(self):
693 def filenode(self):
692 return self._filenode
694 return self._filenode
693 def flags(self):
695 def flags(self):
694 return self._changectx.flags(self._path)
696 return self._changectx.flags(self._path)
695 def filelog(self):
697 def filelog(self):
696 return self._filelog
698 return self._filelog
697 def rev(self):
699 def rev(self):
698 return self._changeid
700 return self._changeid
699 def linkrev(self):
701 def linkrev(self):
700 return self._filelog.linkrev(self._filerev)
702 return self._filelog.linkrev(self._filerev)
701 def node(self):
703 def node(self):
702 return self._changectx.node()
704 return self._changectx.node()
703 def hex(self):
705 def hex(self):
704 return self._changectx.hex()
706 return self._changectx.hex()
705 def user(self):
707 def user(self):
706 return self._changectx.user()
708 return self._changectx.user()
707 def date(self):
709 def date(self):
708 return self._changectx.date()
710 return self._changectx.date()
709 def files(self):
711 def files(self):
710 return self._changectx.files()
712 return self._changectx.files()
711 def description(self):
713 def description(self):
712 return self._changectx.description()
714 return self._changectx.description()
713 def branch(self):
715 def branch(self):
714 return self._changectx.branch()
716 return self._changectx.branch()
715 def extra(self):
717 def extra(self):
716 return self._changectx.extra()
718 return self._changectx.extra()
717 def phase(self):
719 def phase(self):
718 return self._changectx.phase()
720 return self._changectx.phase()
719 def phasestr(self):
721 def phasestr(self):
720 return self._changectx.phasestr()
722 return self._changectx.phasestr()
721 def manifest(self):
723 def manifest(self):
722 return self._changectx.manifest()
724 return self._changectx.manifest()
723 def changectx(self):
725 def changectx(self):
724 return self._changectx
726 return self._changectx
725
727
726 def path(self):
728 def path(self):
727 return self._path
729 return self._path
728
730
729 def isbinary(self):
731 def isbinary(self):
730 try:
732 try:
731 return util.binary(self.data())
733 return util.binary(self.data())
732 except IOError:
734 except IOError:
733 return False
735 return False
734 def isexec(self):
736 def isexec(self):
735 return 'x' in self.flags()
737 return 'x' in self.flags()
736 def islink(self):
738 def islink(self):
737 return 'l' in self.flags()
739 return 'l' in self.flags()
738
740
739 def cmp(self, fctx):
741 def cmp(self, fctx):
740 """compare with other file context
742 """compare with other file context
741
743
742 returns True if different than fctx.
744 returns True if different than fctx.
743 """
745 """
744 if (fctx._filerev is None
746 if (fctx._filerev is None
745 and (self._repo._encodefilterpats
747 and (self._repo._encodefilterpats
746 # if file data starts with '\1\n', empty metadata block is
748 # if file data starts with '\1\n', empty metadata block is
747 # prepended, which adds 4 bytes to filelog.size().
749 # prepended, which adds 4 bytes to filelog.size().
748 or self.size() - 4 == fctx.size())
750 or self.size() - 4 == fctx.size())
749 or self.size() == fctx.size()):
751 or self.size() == fctx.size()):
750 return self._filelog.cmp(self._filenode, fctx.data())
752 return self._filelog.cmp(self._filenode, fctx.data())
751
753
752 return True
754 return True
753
755
754 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
756 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
755 """return the first ancestor of <srcrev> introducting <fnode>
757 """return the first ancestor of <srcrev> introducting <fnode>
756
758
757 If the linkrev of the file revision does not point to an ancestor of
759 If the linkrev of the file revision does not point to an ancestor of
758 srcrev, we'll walk down the ancestors until we find one introducing
760 srcrev, we'll walk down the ancestors until we find one introducing
759 this file revision.
761 this file revision.
760
762
761 :repo: a localrepository object (used to access changelog and manifest)
763 :repo: a localrepository object (used to access changelog and manifest)
762 :path: the file path
764 :path: the file path
763 :fnode: the nodeid of the file revision
765 :fnode: the nodeid of the file revision
764 :filelog: the filelog of this path
766 :filelog: the filelog of this path
765 :srcrev: the changeset revision we search ancestors from
767 :srcrev: the changeset revision we search ancestors from
766 :inclusive: if true, the src revision will also be checked
768 :inclusive: if true, the src revision will also be checked
767 """
769 """
768 repo = self._repo
770 repo = self._repo
769 cl = repo.unfiltered().changelog
771 cl = repo.unfiltered().changelog
770 ma = repo.manifest
772 ma = repo.manifest
771 # fetch the linkrev
773 # fetch the linkrev
772 fr = filelog.rev(fnode)
774 fr = filelog.rev(fnode)
773 lkr = filelog.linkrev(fr)
775 lkr = filelog.linkrev(fr)
774 # hack to reuse ancestor computation when searching for renames
776 # hack to reuse ancestor computation when searching for renames
775 memberanc = getattr(self, '_ancestrycontext', None)
777 memberanc = getattr(self, '_ancestrycontext', None)
776 iteranc = None
778 iteranc = None
777 if memberanc is None:
779 if memberanc is None:
778 memberanc = iteranc = cl.ancestors([srcrev], lkr,
780 memberanc = iteranc = cl.ancestors([srcrev], lkr,
779 inclusive=inclusive)
781 inclusive=inclusive)
780 # check if this linkrev is an ancestor of srcrev
782 # check if this linkrev is an ancestor of srcrev
781 if lkr not in memberanc:
783 if lkr not in memberanc:
782 if iteranc is None:
784 if iteranc is None:
783 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
785 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
784 for a in iteranc:
786 for a in iteranc:
785 ac = cl.read(a) # get changeset data (we avoid object creation)
787 ac = cl.read(a) # get changeset data (we avoid object creation)
786 if path in ac[3]: # checking the 'files' field.
788 if path in ac[3]: # checking the 'files' field.
787 # The file has been touched, check if the content is
789 # The file has been touched, check if the content is
788 # similar to the one we search for.
790 # similar to the one we search for.
789 if fnode == ma.readfast(ac[0]).get(path):
791 if fnode == ma.readfast(ac[0]).get(path):
790 return a
792 return a
791 # In theory, we should never get out of that loop without a result.
793 # In theory, we should never get out of that loop without a result.
792 # But if manifest uses a buggy file revision (not children of the
794 # But if manifest uses a buggy file revision (not children of the
793 # one it replaces) we could. Such a buggy situation will likely
795 # one it replaces) we could. Such a buggy situation will likely
794 # result is crash somewhere else at to some point.
796 # result is crash somewhere else at to some point.
795 return lkr
797 return lkr
796
798
797 def introrev(self):
799 def introrev(self):
798 """return the rev of the changeset which introduced this file revision
800 """return the rev of the changeset which introduced this file revision
799
801
800 This method is different from linkrev because it take into account the
802 This method is different from linkrev because it take into account the
801 changeset the filectx was created from. It ensures the returned
803 changeset the filectx was created from. It ensures the returned
802 revision is one of its ancestors. This prevents bugs from
804 revision is one of its ancestors. This prevents bugs from
803 'linkrev-shadowing' when a file revision is used by multiple
805 'linkrev-shadowing' when a file revision is used by multiple
804 changesets.
806 changesets.
805 """
807 """
806 lkr = self.linkrev()
808 lkr = self.linkrev()
807 attrs = vars(self)
809 attrs = vars(self)
808 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
810 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
809 if noctx or self.rev() == lkr:
811 if noctx or self.rev() == lkr:
810 return self.linkrev()
812 return self.linkrev()
811 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
813 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
812 self.rev(), inclusive=True)
814 self.rev(), inclusive=True)
813
815
814 def parents(self):
816 def parents(self):
815 _path = self._path
817 _path = self._path
816 fl = self._filelog
818 fl = self._filelog
817 parents = self._filelog.parents(self._filenode)
819 parents = self._filelog.parents(self._filenode)
818 pl = [(_path, node, fl) for node in parents if node != nullid]
820 pl = [(_path, node, fl) for node in parents if node != nullid]
819
821
820 r = fl.renamed(self._filenode)
822 r = fl.renamed(self._filenode)
821 if r:
823 if r:
822 # - In the simple rename case, both parent are nullid, pl is empty.
824 # - In the simple rename case, both parent are nullid, pl is empty.
823 # - In case of merge, only one of the parent is null id and should
825 # - In case of merge, only one of the parent is null id and should
824 # be replaced with the rename information. This parent is -always-
826 # be replaced with the rename information. This parent is -always-
825 # the first one.
827 # the first one.
826 #
828 #
827 # As null id have alway been filtered out in the previous list
829 # As null id have alway been filtered out in the previous list
828 # comprehension, inserting to 0 will always result in "replacing
830 # comprehension, inserting to 0 will always result in "replacing
829 # first nullid parent with rename information.
831 # first nullid parent with rename information.
830 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
832 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
831
833
832 ret = []
834 ret = []
833 for path, fnode, l in pl:
835 for path, fnode, l in pl:
834 if '_changeid' in vars(self) or '_changectx' in vars(self):
836 if '_changeid' in vars(self) or '_changectx' in vars(self):
835 # If self is associated with a changeset (probably explicitly
837 # If self is associated with a changeset (probably explicitly
836 # fed), ensure the created filectx is associated with a
838 # fed), ensure the created filectx is associated with a
837 # changeset that is an ancestor of self.changectx.
839 # changeset that is an ancestor of self.changectx.
838 # This lets us later use _adjustlinkrev to get a correct link.
840 # This lets us later use _adjustlinkrev to get a correct link.
839 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
841 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
840 fctx._descendantrev = self.rev()
842 fctx._descendantrev = self.rev()
841 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
843 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
842 elif '_descendantrev' in vars(self):
844 elif '_descendantrev' in vars(self):
843 # Otherwise propagate _descendantrev if we have one associated.
845 # Otherwise propagate _descendantrev if we have one associated.
844 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
846 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
845 fctx._descendantrev = self._descendantrev
847 fctx._descendantrev = self._descendantrev
846 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
848 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
847 else:
849 else:
848 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
850 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
849 ret.append(fctx)
851 ret.append(fctx)
850 return ret
852 return ret
851
853
852 def p1(self):
854 def p1(self):
853 return self.parents()[0]
855 return self.parents()[0]
854
856
855 def p2(self):
857 def p2(self):
856 p = self.parents()
858 p = self.parents()
857 if len(p) == 2:
859 if len(p) == 2:
858 return p[1]
860 return p[1]
859 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
861 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
860
862
861 def annotate(self, follow=False, linenumber=None, diffopts=None):
863 def annotate(self, follow=False, linenumber=None, diffopts=None):
862 '''returns a list of tuples of (ctx, line) for each line
864 '''returns a list of tuples of (ctx, line) for each line
863 in the file, where ctx is the filectx of the node where
865 in the file, where ctx is the filectx of the node where
864 that line was last changed.
866 that line was last changed.
865 This returns tuples of ((ctx, linenumber), line) for each line,
867 This returns tuples of ((ctx, linenumber), line) for each line,
866 if "linenumber" parameter is NOT "None".
868 if "linenumber" parameter is NOT "None".
867 In such tuples, linenumber means one at the first appearance
869 In such tuples, linenumber means one at the first appearance
868 in the managed file.
870 in the managed file.
869 To reduce annotation cost,
871 To reduce annotation cost,
870 this returns fixed value(False is used) as linenumber,
872 this returns fixed value(False is used) as linenumber,
871 if "linenumber" parameter is "False".'''
873 if "linenumber" parameter is "False".'''
872
874
873 if linenumber is None:
875 if linenumber is None:
874 def decorate(text, rev):
876 def decorate(text, rev):
875 return ([rev] * len(text.splitlines()), text)
877 return ([rev] * len(text.splitlines()), text)
876 elif linenumber:
878 elif linenumber:
877 def decorate(text, rev):
879 def decorate(text, rev):
878 size = len(text.splitlines())
880 size = len(text.splitlines())
879 return ([(rev, i) for i in xrange(1, size + 1)], text)
881 return ([(rev, i) for i in xrange(1, size + 1)], text)
880 else:
882 else:
881 def decorate(text, rev):
883 def decorate(text, rev):
882 return ([(rev, False)] * len(text.splitlines()), text)
884 return ([(rev, False)] * len(text.splitlines()), text)
883
885
884 def pair(parent, child):
886 def pair(parent, child):
885 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
887 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
886 refine=True)
888 refine=True)
887 for (a1, a2, b1, b2), t in blocks:
889 for (a1, a2, b1, b2), t in blocks:
888 # Changed blocks ('!') or blocks made only of blank lines ('~')
890 # Changed blocks ('!') or blocks made only of blank lines ('~')
889 # belong to the child.
891 # belong to the child.
890 if t == '=':
892 if t == '=':
891 child[0][b1:b2] = parent[0][a1:a2]
893 child[0][b1:b2] = parent[0][a1:a2]
892 return child
894 return child
893
895
894 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
896 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
895
897
896 def parents(f):
898 def parents(f):
897 pl = f.parents()
899 pl = f.parents()
898
900
899 # Don't return renamed parents if we aren't following.
901 # Don't return renamed parents if we aren't following.
900 if not follow:
902 if not follow:
901 pl = [p for p in pl if p.path() == f.path()]
903 pl = [p for p in pl if p.path() == f.path()]
902
904
903 # renamed filectx won't have a filelog yet, so set it
905 # renamed filectx won't have a filelog yet, so set it
904 # from the cache to save time
906 # from the cache to save time
905 for p in pl:
907 for p in pl:
906 if not '_filelog' in p.__dict__:
908 if not '_filelog' in p.__dict__:
907 p._filelog = getlog(p.path())
909 p._filelog = getlog(p.path())
908
910
909 return pl
911 return pl
910
912
911 # use linkrev to find the first changeset where self appeared
913 # use linkrev to find the first changeset where self appeared
912 base = self
914 base = self
913 introrev = self.introrev()
915 introrev = self.introrev()
914 if self.rev() != introrev:
916 if self.rev() != introrev:
915 base = self.filectx(self.filenode(), changeid=introrev)
917 base = self.filectx(self.filenode(), changeid=introrev)
916
918
917 # This algorithm would prefer to be recursive, but Python is a
919 # This algorithm would prefer to be recursive, but Python is a
918 # bit recursion-hostile. Instead we do an iterative
920 # bit recursion-hostile. Instead we do an iterative
919 # depth-first search.
921 # depth-first search.
920
922
921 visit = [base]
923 visit = [base]
922 hist = {}
924 hist = {}
923 pcache = {}
925 pcache = {}
924 needed = {base: 1}
926 needed = {base: 1}
925 while visit:
927 while visit:
926 f = visit[-1]
928 f = visit[-1]
927 pcached = f in pcache
929 pcached = f in pcache
928 if not pcached:
930 if not pcached:
929 pcache[f] = parents(f)
931 pcache[f] = parents(f)
930
932
931 ready = True
933 ready = True
932 pl = pcache[f]
934 pl = pcache[f]
933 for p in pl:
935 for p in pl:
934 if p not in hist:
936 if p not in hist:
935 ready = False
937 ready = False
936 visit.append(p)
938 visit.append(p)
937 if not pcached:
939 if not pcached:
938 needed[p] = needed.get(p, 0) + 1
940 needed[p] = needed.get(p, 0) + 1
939 if ready:
941 if ready:
940 visit.pop()
942 visit.pop()
941 reusable = f in hist
943 reusable = f in hist
942 if reusable:
944 if reusable:
943 curr = hist[f]
945 curr = hist[f]
944 else:
946 else:
945 curr = decorate(f.data(), f)
947 curr = decorate(f.data(), f)
946 for p in pl:
948 for p in pl:
947 if not reusable:
949 if not reusable:
948 curr = pair(hist[p], curr)
950 curr = pair(hist[p], curr)
949 if needed[p] == 1:
951 if needed[p] == 1:
950 del hist[p]
952 del hist[p]
951 del needed[p]
953 del needed[p]
952 else:
954 else:
953 needed[p] -= 1
955 needed[p] -= 1
954
956
955 hist[f] = curr
957 hist[f] = curr
956 pcache[f] = []
958 pcache[f] = []
957
959
958 return zip(hist[base][0], hist[base][1].splitlines(True))
960 return zip(hist[base][0], hist[base][1].splitlines(True))
959
961
960 def ancestors(self, followfirst=False):
962 def ancestors(self, followfirst=False):
961 visit = {}
963 visit = {}
962 c = self
964 c = self
963 cut = followfirst and 1 or None
965 cut = followfirst and 1 or None
964 while True:
966 while True:
965 for parent in c.parents()[:cut]:
967 for parent in c.parents()[:cut]:
966 visit[(parent.linkrev(), parent.filenode())] = parent
968 visit[(parent.linkrev(), parent.filenode())] = parent
967 if not visit:
969 if not visit:
968 break
970 break
969 c = visit.pop(max(visit))
971 c = visit.pop(max(visit))
970 yield c
972 yield c
971
973
972 class filectx(basefilectx):
974 class filectx(basefilectx):
973 """A filecontext object makes access to data related to a particular
975 """A filecontext object makes access to data related to a particular
974 filerevision convenient."""
976 filerevision convenient."""
975 def __init__(self, repo, path, changeid=None, fileid=None,
977 def __init__(self, repo, path, changeid=None, fileid=None,
976 filelog=None, changectx=None):
978 filelog=None, changectx=None):
977 """changeid can be a changeset revision, node, or tag.
979 """changeid can be a changeset revision, node, or tag.
978 fileid can be a file revision or node."""
980 fileid can be a file revision or node."""
979 self._repo = repo
981 self._repo = repo
980 self._path = path
982 self._path = path
981
983
982 assert (changeid is not None
984 assert (changeid is not None
983 or fileid is not None
985 or fileid is not None
984 or changectx is not None), \
986 or changectx is not None), \
985 ("bad args: changeid=%r, fileid=%r, changectx=%r"
987 ("bad args: changeid=%r, fileid=%r, changectx=%r"
986 % (changeid, fileid, changectx))
988 % (changeid, fileid, changectx))
987
989
988 if filelog is not None:
990 if filelog is not None:
989 self._filelog = filelog
991 self._filelog = filelog
990
992
991 if changeid is not None:
993 if changeid is not None:
992 self._changeid = changeid
994 self._changeid = changeid
993 if changectx is not None:
995 if changectx is not None:
994 self._changectx = changectx
996 self._changectx = changectx
995 if fileid is not None:
997 if fileid is not None:
996 self._fileid = fileid
998 self._fileid = fileid
997
999
998 @propertycache
1000 @propertycache
999 def _changectx(self):
1001 def _changectx(self):
1000 try:
1002 try:
1001 return changectx(self._repo, self._changeid)
1003 return changectx(self._repo, self._changeid)
1002 except error.FilteredRepoLookupError:
1004 except error.FilteredRepoLookupError:
1003 # Linkrev may point to any revision in the repository. When the
1005 # Linkrev may point to any revision in the repository. When the
1004 # repository is filtered this may lead to `filectx` trying to build
1006 # repository is filtered this may lead to `filectx` trying to build
1005 # `changectx` for filtered revision. In such case we fallback to
1007 # `changectx` for filtered revision. In such case we fallback to
1006 # creating `changectx` on the unfiltered version of the reposition.
1008 # creating `changectx` on the unfiltered version of the reposition.
1007 # This fallback should not be an issue because `changectx` from
1009 # This fallback should not be an issue because `changectx` from
1008 # `filectx` are not used in complex operations that care about
1010 # `filectx` are not used in complex operations that care about
1009 # filtering.
1011 # filtering.
1010 #
1012 #
1011 # This fallback is a cheap and dirty fix that prevent several
1013 # This fallback is a cheap and dirty fix that prevent several
1012 # crashes. It does not ensure the behavior is correct. However the
1014 # crashes. It does not ensure the behavior is correct. However the
1013 # behavior was not correct before filtering either and "incorrect
1015 # behavior was not correct before filtering either and "incorrect
1014 # behavior" is seen as better as "crash"
1016 # behavior" is seen as better as "crash"
1015 #
1017 #
1016 # Linkrevs have several serious troubles with filtering that are
1018 # Linkrevs have several serious troubles with filtering that are
1017 # complicated to solve. Proper handling of the issue here should be
1019 # complicated to solve. Proper handling of the issue here should be
1018 # considered when solving linkrev issue are on the table.
1020 # considered when solving linkrev issue are on the table.
1019 return changectx(self._repo.unfiltered(), self._changeid)
1021 return changectx(self._repo.unfiltered(), self._changeid)
1020
1022
1021 def filectx(self, fileid, changeid=None):
1023 def filectx(self, fileid, changeid=None):
1022 '''opens an arbitrary revision of the file without
1024 '''opens an arbitrary revision of the file without
1023 opening a new filelog'''
1025 opening a new filelog'''
1024 return filectx(self._repo, self._path, fileid=fileid,
1026 return filectx(self._repo, self._path, fileid=fileid,
1025 filelog=self._filelog, changeid=changeid)
1027 filelog=self._filelog, changeid=changeid)
1026
1028
1027 def data(self):
1029 def data(self):
1028 try:
1030 try:
1029 return self._filelog.read(self._filenode)
1031 return self._filelog.read(self._filenode)
1030 except error.CensoredNodeError:
1032 except error.CensoredNodeError:
1031 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1033 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1032 return ""
1034 return ""
1033 raise util.Abort(_("censored node: %s") % short(self._filenode),
1035 raise util.Abort(_("censored node: %s") % short(self._filenode),
1034 hint=_("set censor.policy to ignore errors"))
1036 hint=_("set censor.policy to ignore errors"))
1035
1037
1036 def size(self):
1038 def size(self):
1037 return self._filelog.size(self._filerev)
1039 return self._filelog.size(self._filerev)
1038
1040
1039 def renamed(self):
1041 def renamed(self):
1040 """check if file was actually renamed in this changeset revision
1042 """check if file was actually renamed in this changeset revision
1041
1043
1042 If rename logged in file revision, we report copy for changeset only
1044 If rename logged in file revision, we report copy for changeset only
1043 if file revisions linkrev points back to the changeset in question
1045 if file revisions linkrev points back to the changeset in question
1044 or both changeset parents contain different file revisions.
1046 or both changeset parents contain different file revisions.
1045 """
1047 """
1046
1048
1047 renamed = self._filelog.renamed(self._filenode)
1049 renamed = self._filelog.renamed(self._filenode)
1048 if not renamed:
1050 if not renamed:
1049 return renamed
1051 return renamed
1050
1052
1051 if self.rev() == self.linkrev():
1053 if self.rev() == self.linkrev():
1052 return renamed
1054 return renamed
1053
1055
1054 name = self.path()
1056 name = self.path()
1055 fnode = self._filenode
1057 fnode = self._filenode
1056 for p in self._changectx.parents():
1058 for p in self._changectx.parents():
1057 try:
1059 try:
1058 if fnode == p.filenode(name):
1060 if fnode == p.filenode(name):
1059 return None
1061 return None
1060 except error.LookupError:
1062 except error.LookupError:
1061 pass
1063 pass
1062 return renamed
1064 return renamed
1063
1065
1064 def children(self):
1066 def children(self):
1065 # hard for renames
1067 # hard for renames
1066 c = self._filelog.children(self._filenode)
1068 c = self._filelog.children(self._filenode)
1067 return [filectx(self._repo, self._path, fileid=x,
1069 return [filectx(self._repo, self._path, fileid=x,
1068 filelog=self._filelog) for x in c]
1070 filelog=self._filelog) for x in c]
1069
1071
1070 class committablectx(basectx):
1072 class committablectx(basectx):
1071 """A committablectx object provides common functionality for a context that
1073 """A committablectx object provides common functionality for a context that
1072 wants the ability to commit, e.g. workingctx or memctx."""
1074 wants the ability to commit, e.g. workingctx or memctx."""
1073 def __init__(self, repo, text="", user=None, date=None, extra=None,
1075 def __init__(self, repo, text="", user=None, date=None, extra=None,
1074 changes=None):
1076 changes=None):
1075 self._repo = repo
1077 self._repo = repo
1076 self._rev = None
1078 self._rev = None
1077 self._node = None
1079 self._node = None
1078 self._text = text
1080 self._text = text
1079 if date:
1081 if date:
1080 self._date = util.parsedate(date)
1082 self._date = util.parsedate(date)
1081 if user:
1083 if user:
1082 self._user = user
1084 self._user = user
1083 if changes:
1085 if changes:
1084 self._status = changes
1086 self._status = changes
1085
1087
1086 self._extra = {}
1088 self._extra = {}
1087 if extra:
1089 if extra:
1088 self._extra = extra.copy()
1090 self._extra = extra.copy()
1089 if 'branch' not in self._extra:
1091 if 'branch' not in self._extra:
1090 try:
1092 try:
1091 branch = encoding.fromlocal(self._repo.dirstate.branch())
1093 branch = encoding.fromlocal(self._repo.dirstate.branch())
1092 except UnicodeDecodeError:
1094 except UnicodeDecodeError:
1093 raise util.Abort(_('branch name not in UTF-8!'))
1095 raise util.Abort(_('branch name not in UTF-8!'))
1094 self._extra['branch'] = branch
1096 self._extra['branch'] = branch
1095 if self._extra['branch'] == '':
1097 if self._extra['branch'] == '':
1096 self._extra['branch'] = 'default'
1098 self._extra['branch'] = 'default'
1097
1099
1098 def __str__(self):
1100 def __str__(self):
1099 return str(self._parents[0]) + "+"
1101 return str(self._parents[0]) + "+"
1100
1102
1101 def __nonzero__(self):
1103 def __nonzero__(self):
1102 return True
1104 return True
1103
1105
1104 def _buildflagfunc(self):
1106 def _buildflagfunc(self):
1105 # Create a fallback function for getting file flags when the
1107 # Create a fallback function for getting file flags when the
1106 # filesystem doesn't support them
1108 # filesystem doesn't support them
1107
1109
1108 copiesget = self._repo.dirstate.copies().get
1110 copiesget = self._repo.dirstate.copies().get
1109
1111
1110 if len(self._parents) < 2:
1112 if len(self._parents) < 2:
1111 # when we have one parent, it's easy: copy from parent
1113 # when we have one parent, it's easy: copy from parent
1112 man = self._parents[0].manifest()
1114 man = self._parents[0].manifest()
1113 def func(f):
1115 def func(f):
1114 f = copiesget(f, f)
1116 f = copiesget(f, f)
1115 return man.flags(f)
1117 return man.flags(f)
1116 else:
1118 else:
1117 # merges are tricky: we try to reconstruct the unstored
1119 # merges are tricky: we try to reconstruct the unstored
1118 # result from the merge (issue1802)
1120 # result from the merge (issue1802)
1119 p1, p2 = self._parents
1121 p1, p2 = self._parents
1120 pa = p1.ancestor(p2)
1122 pa = p1.ancestor(p2)
1121 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1123 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1122
1124
1123 def func(f):
1125 def func(f):
1124 f = copiesget(f, f) # may be wrong for merges with copies
1126 f = copiesget(f, f) # may be wrong for merges with copies
1125 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1127 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1126 if fl1 == fl2:
1128 if fl1 == fl2:
1127 return fl1
1129 return fl1
1128 if fl1 == fla:
1130 if fl1 == fla:
1129 return fl2
1131 return fl2
1130 if fl2 == fla:
1132 if fl2 == fla:
1131 return fl1
1133 return fl1
1132 return '' # punt for conflicts
1134 return '' # punt for conflicts
1133
1135
1134 return func
1136 return func
1135
1137
1136 @propertycache
1138 @propertycache
1137 def _flagfunc(self):
1139 def _flagfunc(self):
1138 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1140 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1139
1141
1140 @propertycache
1142 @propertycache
1141 def _manifest(self):
1143 def _manifest(self):
1142 """generate a manifest corresponding to the values in self._status
1144 """generate a manifest corresponding to the values in self._status
1143
1145
1144 This reuse the file nodeid from parent, but we append an extra letter
1146 This reuse the file nodeid from parent, but we append an extra letter
1145 when modified. Modified files get an extra 'm' while added files get
1147 when modified. Modified files get an extra 'm' while added files get
1146 an extra 'a'. This is used by manifests merge to see that files
1148 an extra 'a'. This is used by manifests merge to see that files
1147 are different and by update logic to avoid deleting newly added files.
1149 are different and by update logic to avoid deleting newly added files.
1148 """
1150 """
1149
1151
1150 man1 = self._parents[0].manifest()
1152 man1 = self._parents[0].manifest()
1151 man = man1.copy()
1153 man = man1.copy()
1152 if len(self._parents) > 1:
1154 if len(self._parents) > 1:
1153 man2 = self.p2().manifest()
1155 man2 = self.p2().manifest()
1154 def getman(f):
1156 def getman(f):
1155 if f in man1:
1157 if f in man1:
1156 return man1
1158 return man1
1157 return man2
1159 return man2
1158 else:
1160 else:
1159 getman = lambda f: man1
1161 getman = lambda f: man1
1160
1162
1161 copied = self._repo.dirstate.copies()
1163 copied = self._repo.dirstate.copies()
1162 ff = self._flagfunc
1164 ff = self._flagfunc
1163 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1165 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1164 for f in l:
1166 for f in l:
1165 orig = copied.get(f, f)
1167 orig = copied.get(f, f)
1166 man[f] = getman(orig).get(orig, nullid) + i
1168 man[f] = getman(orig).get(orig, nullid) + i
1167 try:
1169 try:
1168 man.setflag(f, ff(f))
1170 man.setflag(f, ff(f))
1169 except OSError:
1171 except OSError:
1170 pass
1172 pass
1171
1173
1172 for f in self._status.deleted + self._status.removed:
1174 for f in self._status.deleted + self._status.removed:
1173 if f in man:
1175 if f in man:
1174 del man[f]
1176 del man[f]
1175
1177
1176 return man
1178 return man
1177
1179
1178 @propertycache
1180 @propertycache
1179 def _status(self):
1181 def _status(self):
1180 return self._repo.status()
1182 return self._repo.status()
1181
1183
1182 @propertycache
1184 @propertycache
1183 def _user(self):
1185 def _user(self):
1184 return self._repo.ui.username()
1186 return self._repo.ui.username()
1185
1187
1186 @propertycache
1188 @propertycache
1187 def _date(self):
1189 def _date(self):
1188 return util.makedate()
1190 return util.makedate()
1189
1191
1190 def subrev(self, subpath):
1192 def subrev(self, subpath):
1191 return None
1193 return None
1192
1194
1193 def user(self):
1195 def user(self):
1194 return self._user or self._repo.ui.username()
1196 return self._user or self._repo.ui.username()
1195 def date(self):
1197 def date(self):
1196 return self._date
1198 return self._date
1197 def description(self):
1199 def description(self):
1198 return self._text
1200 return self._text
1199 def files(self):
1201 def files(self):
1200 return sorted(self._status.modified + self._status.added +
1202 return sorted(self._status.modified + self._status.added +
1201 self._status.removed)
1203 self._status.removed)
1202
1204
1203 def modified(self):
1205 def modified(self):
1204 return self._status.modified
1206 return self._status.modified
1205 def added(self):
1207 def added(self):
1206 return self._status.added
1208 return self._status.added
1207 def removed(self):
1209 def removed(self):
1208 return self._status.removed
1210 return self._status.removed
1209 def deleted(self):
1211 def deleted(self):
1210 return self._status.deleted
1212 return self._status.deleted
1211 def branch(self):
1213 def branch(self):
1212 return encoding.tolocal(self._extra['branch'])
1214 return encoding.tolocal(self._extra['branch'])
1213 def closesbranch(self):
1215 def closesbranch(self):
1214 return 'close' in self._extra
1216 return 'close' in self._extra
1215 def extra(self):
1217 def extra(self):
1216 return self._extra
1218 return self._extra
1217
1219
1218 def tags(self):
1220 def tags(self):
1219 t = []
1221 t = []
1220 for p in self.parents():
1222 for p in self.parents():
1221 t.extend(p.tags())
1223 t.extend(p.tags())
1222 return t
1224 return t
1223
1225
1224 def bookmarks(self):
1226 def bookmarks(self):
1225 b = []
1227 b = []
1226 for p in self.parents():
1228 for p in self.parents():
1227 b.extend(p.bookmarks())
1229 b.extend(p.bookmarks())
1228 return b
1230 return b
1229
1231
1230 def phase(self):
1232 def phase(self):
1231 phase = phases.draft # default phase to draft
1233 phase = phases.draft # default phase to draft
1232 for p in self.parents():
1234 for p in self.parents():
1233 phase = max(phase, p.phase())
1235 phase = max(phase, p.phase())
1234 return phase
1236 return phase
1235
1237
1236 def hidden(self):
1238 def hidden(self):
1237 return False
1239 return False
1238
1240
1239 def children(self):
1241 def children(self):
1240 return []
1242 return []
1241
1243
1242 def flags(self, path):
1244 def flags(self, path):
1243 if '_manifest' in self.__dict__:
1245 if '_manifest' in self.__dict__:
1244 try:
1246 try:
1245 return self._manifest.flags(path)
1247 return self._manifest.flags(path)
1246 except KeyError:
1248 except KeyError:
1247 return ''
1249 return ''
1248
1250
1249 try:
1251 try:
1250 return self._flagfunc(path)
1252 return self._flagfunc(path)
1251 except OSError:
1253 except OSError:
1252 return ''
1254 return ''
1253
1255
1254 def ancestor(self, c2):
1256 def ancestor(self, c2):
1255 """return the "best" ancestor context of self and c2"""
1257 """return the "best" ancestor context of self and c2"""
1256 return self._parents[0].ancestor(c2) # punt on two parents for now
1258 return self._parents[0].ancestor(c2) # punt on two parents for now
1257
1259
1258 def walk(self, match):
1260 def walk(self, match):
1259 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1261 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1260 True, False))
1262 True, False))
1261
1263
1262 def matches(self, match):
1264 def matches(self, match):
1263 return sorted(self._repo.dirstate.matches(match))
1265 return sorted(self._repo.dirstate.matches(match))
1264
1266
1265 def ancestors(self):
1267 def ancestors(self):
1266 for p in self._parents:
1268 for p in self._parents:
1267 yield p
1269 yield p
1268 for a in self._repo.changelog.ancestors(
1270 for a in self._repo.changelog.ancestors(
1269 [p.rev() for p in self._parents]):
1271 [p.rev() for p in self._parents]):
1270 yield changectx(self._repo, a)
1272 yield changectx(self._repo, a)
1271
1273
1272 def markcommitted(self, node):
1274 def markcommitted(self, node):
1273 """Perform post-commit cleanup necessary after committing this ctx
1275 """Perform post-commit cleanup necessary after committing this ctx
1274
1276
1275 Specifically, this updates backing stores this working context
1277 Specifically, this updates backing stores this working context
1276 wraps to reflect the fact that the changes reflected by this
1278 wraps to reflect the fact that the changes reflected by this
1277 workingctx have been committed. For example, it marks
1279 workingctx have been committed. For example, it marks
1278 modified and added files as normal in the dirstate.
1280 modified and added files as normal in the dirstate.
1279
1281
1280 """
1282 """
1281
1283
1282 self._repo.dirstate.beginparentchange()
1284 self._repo.dirstate.beginparentchange()
1283 for f in self.modified() + self.added():
1285 for f in self.modified() + self.added():
1284 self._repo.dirstate.normal(f)
1286 self._repo.dirstate.normal(f)
1285 for f in self.removed():
1287 for f in self.removed():
1286 self._repo.dirstate.drop(f)
1288 self._repo.dirstate.drop(f)
1287 self._repo.dirstate.setparents(node)
1289 self._repo.dirstate.setparents(node)
1288 self._repo.dirstate.endparentchange()
1290 self._repo.dirstate.endparentchange()
1289
1291
1290 def dirs(self):
1292 def dirs(self):
1291 return self._repo.dirstate.dirs()
1293 return self._repo.dirstate.dirs()
1292
1294
1293 class workingctx(committablectx):
1295 class workingctx(committablectx):
1294 """A workingctx object makes access to data related to
1296 """A workingctx object makes access to data related to
1295 the current working directory convenient.
1297 the current working directory convenient.
1296 date - any valid date string or (unixtime, offset), or None.
1298 date - any valid date string or (unixtime, offset), or None.
1297 user - username string, or None.
1299 user - username string, or None.
1298 extra - a dictionary of extra values, or None.
1300 extra - a dictionary of extra values, or None.
1299 changes - a list of file lists as returned by localrepo.status()
1301 changes - a list of file lists as returned by localrepo.status()
1300 or None to use the repository status.
1302 or None to use the repository status.
1301 """
1303 """
1302 def __init__(self, repo, text="", user=None, date=None, extra=None,
1304 def __init__(self, repo, text="", user=None, date=None, extra=None,
1303 changes=None):
1305 changes=None):
1304 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1306 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1305
1307
1306 def __iter__(self):
1308 def __iter__(self):
1307 d = self._repo.dirstate
1309 d = self._repo.dirstate
1308 for f in d:
1310 for f in d:
1309 if d[f] != 'r':
1311 if d[f] != 'r':
1310 yield f
1312 yield f
1311
1313
1312 def __contains__(self, key):
1314 def __contains__(self, key):
1313 return self._repo.dirstate[key] not in "?r"
1315 return self._repo.dirstate[key] not in "?r"
1314
1316
1315 @propertycache
1317 @propertycache
1316 def _parents(self):
1318 def _parents(self):
1317 p = self._repo.dirstate.parents()
1319 p = self._repo.dirstate.parents()
1318 if p[1] == nullid:
1320 if p[1] == nullid:
1319 p = p[:-1]
1321 p = p[:-1]
1320 return [changectx(self._repo, x) for x in p]
1322 return [changectx(self._repo, x) for x in p]
1321
1323
1322 def filectx(self, path, filelog=None):
1324 def filectx(self, path, filelog=None):
1323 """get a file context from the working directory"""
1325 """get a file context from the working directory"""
1324 return workingfilectx(self._repo, path, workingctx=self,
1326 return workingfilectx(self._repo, path, workingctx=self,
1325 filelog=filelog)
1327 filelog=filelog)
1326
1328
1327 def dirty(self, missing=False, merge=True, branch=True):
1329 def dirty(self, missing=False, merge=True, branch=True):
1328 "check whether a working directory is modified"
1330 "check whether a working directory is modified"
1329 # check subrepos first
1331 # check subrepos first
1330 for s in sorted(self.substate):
1332 for s in sorted(self.substate):
1331 if self.sub(s).dirty():
1333 if self.sub(s).dirty():
1332 return True
1334 return True
1333 # check current working dir
1335 # check current working dir
1334 return ((merge and self.p2()) or
1336 return ((merge and self.p2()) or
1335 (branch and self.branch() != self.p1().branch()) or
1337 (branch and self.branch() != self.p1().branch()) or
1336 self.modified() or self.added() or self.removed() or
1338 self.modified() or self.added() or self.removed() or
1337 (missing and self.deleted()))
1339 (missing and self.deleted()))
1338
1340
1339 def add(self, list, prefix=""):
1341 def add(self, list, prefix=""):
1340 join = lambda f: os.path.join(prefix, f)
1342 join = lambda f: os.path.join(prefix, f)
1341 wlock = self._repo.wlock()
1343 wlock = self._repo.wlock()
1342 ui, ds = self._repo.ui, self._repo.dirstate
1344 ui, ds = self._repo.ui, self._repo.dirstate
1343 try:
1345 try:
1344 rejected = []
1346 rejected = []
1345 lstat = self._repo.wvfs.lstat
1347 lstat = self._repo.wvfs.lstat
1346 for f in list:
1348 for f in list:
1347 scmutil.checkportable(ui, join(f))
1349 scmutil.checkportable(ui, join(f))
1348 try:
1350 try:
1349 st = lstat(f)
1351 st = lstat(f)
1350 except OSError:
1352 except OSError:
1351 ui.warn(_("%s does not exist!\n") % join(f))
1353 ui.warn(_("%s does not exist!\n") % join(f))
1352 rejected.append(f)
1354 rejected.append(f)
1353 continue
1355 continue
1354 if st.st_size > 10000000:
1356 if st.st_size > 10000000:
1355 ui.warn(_("%s: up to %d MB of RAM may be required "
1357 ui.warn(_("%s: up to %d MB of RAM may be required "
1356 "to manage this file\n"
1358 "to manage this file\n"
1357 "(use 'hg revert %s' to cancel the "
1359 "(use 'hg revert %s' to cancel the "
1358 "pending addition)\n")
1360 "pending addition)\n")
1359 % (f, 3 * st.st_size // 1000000, join(f)))
1361 % (f, 3 * st.st_size // 1000000, join(f)))
1360 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1362 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1361 ui.warn(_("%s not added: only files and symlinks "
1363 ui.warn(_("%s not added: only files and symlinks "
1362 "supported currently\n") % join(f))
1364 "supported currently\n") % join(f))
1363 rejected.append(f)
1365 rejected.append(f)
1364 elif ds[f] in 'amn':
1366 elif ds[f] in 'amn':
1365 ui.warn(_("%s already tracked!\n") % join(f))
1367 ui.warn(_("%s already tracked!\n") % join(f))
1366 elif ds[f] == 'r':
1368 elif ds[f] == 'r':
1367 ds.normallookup(f)
1369 ds.normallookup(f)
1368 else:
1370 else:
1369 ds.add(f)
1371 ds.add(f)
1370 return rejected
1372 return rejected
1371 finally:
1373 finally:
1372 wlock.release()
1374 wlock.release()
1373
1375
1374 def forget(self, files, prefix=""):
1376 def forget(self, files, prefix=""):
1375 join = lambda f: os.path.join(prefix, f)
1377 join = lambda f: os.path.join(prefix, f)
1376 wlock = self._repo.wlock()
1378 wlock = self._repo.wlock()
1377 try:
1379 try:
1378 rejected = []
1380 rejected = []
1379 for f in files:
1381 for f in files:
1380 if f not in self._repo.dirstate:
1382 if f not in self._repo.dirstate:
1381 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1383 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1382 rejected.append(f)
1384 rejected.append(f)
1383 elif self._repo.dirstate[f] != 'a':
1385 elif self._repo.dirstate[f] != 'a':
1384 self._repo.dirstate.remove(f)
1386 self._repo.dirstate.remove(f)
1385 else:
1387 else:
1386 self._repo.dirstate.drop(f)
1388 self._repo.dirstate.drop(f)
1387 return rejected
1389 return rejected
1388 finally:
1390 finally:
1389 wlock.release()
1391 wlock.release()
1390
1392
1391 def undelete(self, list):
1393 def undelete(self, list):
1392 pctxs = self.parents()
1394 pctxs = self.parents()
1393 wlock = self._repo.wlock()
1395 wlock = self._repo.wlock()
1394 try:
1396 try:
1395 for f in list:
1397 for f in list:
1396 if self._repo.dirstate[f] != 'r':
1398 if self._repo.dirstate[f] != 'r':
1397 self._repo.ui.warn(_("%s not removed!\n") % f)
1399 self._repo.ui.warn(_("%s not removed!\n") % f)
1398 else:
1400 else:
1399 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1401 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1400 t = fctx.data()
1402 t = fctx.data()
1401 self._repo.wwrite(f, t, fctx.flags())
1403 self._repo.wwrite(f, t, fctx.flags())
1402 self._repo.dirstate.normal(f)
1404 self._repo.dirstate.normal(f)
1403 finally:
1405 finally:
1404 wlock.release()
1406 wlock.release()
1405
1407
1406 def copy(self, source, dest):
1408 def copy(self, source, dest):
1407 try:
1409 try:
1408 st = self._repo.wvfs.lstat(dest)
1410 st = self._repo.wvfs.lstat(dest)
1409 except OSError, err:
1411 except OSError, err:
1410 if err.errno != errno.ENOENT:
1412 if err.errno != errno.ENOENT:
1411 raise
1413 raise
1412 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1414 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1413 return
1415 return
1414 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1416 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1415 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1417 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1416 "symbolic link\n") % dest)
1418 "symbolic link\n") % dest)
1417 else:
1419 else:
1418 wlock = self._repo.wlock()
1420 wlock = self._repo.wlock()
1419 try:
1421 try:
1420 if self._repo.dirstate[dest] in '?':
1422 if self._repo.dirstate[dest] in '?':
1421 self._repo.dirstate.add(dest)
1423 self._repo.dirstate.add(dest)
1422 elif self._repo.dirstate[dest] in 'r':
1424 elif self._repo.dirstate[dest] in 'r':
1423 self._repo.dirstate.normallookup(dest)
1425 self._repo.dirstate.normallookup(dest)
1424 self._repo.dirstate.copy(source, dest)
1426 self._repo.dirstate.copy(source, dest)
1425 finally:
1427 finally:
1426 wlock.release()
1428 wlock.release()
1427
1429
1428 def _filtersuspectsymlink(self, files):
1430 def _filtersuspectsymlink(self, files):
1429 if not files or self._repo.dirstate._checklink:
1431 if not files or self._repo.dirstate._checklink:
1430 return files
1432 return files
1431
1433
1432 # Symlink placeholders may get non-symlink-like contents
1434 # Symlink placeholders may get non-symlink-like contents
1433 # via user error or dereferencing by NFS or Samba servers,
1435 # via user error or dereferencing by NFS or Samba servers,
1434 # so we filter out any placeholders that don't look like a
1436 # so we filter out any placeholders that don't look like a
1435 # symlink
1437 # symlink
1436 sane = []
1438 sane = []
1437 for f in files:
1439 for f in files:
1438 if self.flags(f) == 'l':
1440 if self.flags(f) == 'l':
1439 d = self[f].data()
1441 d = self[f].data()
1440 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1442 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1441 self._repo.ui.debug('ignoring suspect symlink placeholder'
1443 self._repo.ui.debug('ignoring suspect symlink placeholder'
1442 ' "%s"\n' % f)
1444 ' "%s"\n' % f)
1443 continue
1445 continue
1444 sane.append(f)
1446 sane.append(f)
1445 return sane
1447 return sane
1446
1448
1447 def _checklookup(self, files):
1449 def _checklookup(self, files):
1448 # check for any possibly clean files
1450 # check for any possibly clean files
1449 if not files:
1451 if not files:
1450 return [], []
1452 return [], []
1451
1453
1452 modified = []
1454 modified = []
1453 fixup = []
1455 fixup = []
1454 pctx = self._parents[0]
1456 pctx = self._parents[0]
1455 # do a full compare of any files that might have changed
1457 # do a full compare of any files that might have changed
1456 for f in sorted(files):
1458 for f in sorted(files):
1457 if (f not in pctx or self.flags(f) != pctx.flags(f)
1459 if (f not in pctx or self.flags(f) != pctx.flags(f)
1458 or pctx[f].cmp(self[f])):
1460 or pctx[f].cmp(self[f])):
1459 modified.append(f)
1461 modified.append(f)
1460 else:
1462 else:
1461 fixup.append(f)
1463 fixup.append(f)
1462
1464
1463 # update dirstate for files that are actually clean
1465 # update dirstate for files that are actually clean
1464 if fixup:
1466 if fixup:
1465 try:
1467 try:
1466 # updating the dirstate is optional
1468 # updating the dirstate is optional
1467 # so we don't wait on the lock
1469 # so we don't wait on the lock
1468 # wlock can invalidate the dirstate, so cache normal _after_
1470 # wlock can invalidate the dirstate, so cache normal _after_
1469 # taking the lock
1471 # taking the lock
1470 wlock = self._repo.wlock(False)
1472 wlock = self._repo.wlock(False)
1471 normal = self._repo.dirstate.normal
1473 normal = self._repo.dirstate.normal
1472 try:
1474 try:
1473 for f in fixup:
1475 for f in fixup:
1474 normal(f)
1476 normal(f)
1475 finally:
1477 finally:
1476 wlock.release()
1478 wlock.release()
1477 except error.LockError:
1479 except error.LockError:
1478 pass
1480 pass
1479 return modified, fixup
1481 return modified, fixup
1480
1482
1481 def _manifestmatches(self, match, s):
1483 def _manifestmatches(self, match, s):
1482 """Slow path for workingctx
1484 """Slow path for workingctx
1483
1485
1484 The fast path is when we compare the working directory to its parent
1486 The fast path is when we compare the working directory to its parent
1485 which means this function is comparing with a non-parent; therefore we
1487 which means this function is comparing with a non-parent; therefore we
1486 need to build a manifest and return what matches.
1488 need to build a manifest and return what matches.
1487 """
1489 """
1488 mf = self._repo['.']._manifestmatches(match, s)
1490 mf = self._repo['.']._manifestmatches(match, s)
1489 for f in s.modified + s.added:
1491 for f in s.modified + s.added:
1490 mf[f] = _newnode
1492 mf[f] = _newnode
1491 mf.setflag(f, self.flags(f))
1493 mf.setflag(f, self.flags(f))
1492 for f in s.removed:
1494 for f in s.removed:
1493 if f in mf:
1495 if f in mf:
1494 del mf[f]
1496 del mf[f]
1495 return mf
1497 return mf
1496
1498
1497 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1499 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1498 unknown=False):
1500 unknown=False):
1499 '''Gets the status from the dirstate -- internal use only.'''
1501 '''Gets the status from the dirstate -- internal use only.'''
1500 listignored, listclean, listunknown = ignored, clean, unknown
1502 listignored, listclean, listunknown = ignored, clean, unknown
1501 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1503 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1502 subrepos = []
1504 subrepos = []
1503 if '.hgsub' in self:
1505 if '.hgsub' in self:
1504 subrepos = sorted(self.substate)
1506 subrepos = sorted(self.substate)
1505 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1507 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1506 listclean, listunknown)
1508 listclean, listunknown)
1507
1509
1508 # check for any possibly clean files
1510 # check for any possibly clean files
1509 if cmp:
1511 if cmp:
1510 modified2, fixup = self._checklookup(cmp)
1512 modified2, fixup = self._checklookup(cmp)
1511 s.modified.extend(modified2)
1513 s.modified.extend(modified2)
1512
1514
1513 # update dirstate for files that are actually clean
1515 # update dirstate for files that are actually clean
1514 if fixup and listclean:
1516 if fixup and listclean:
1515 s.clean.extend(fixup)
1517 s.clean.extend(fixup)
1516
1518
1517 if match.always():
1519 if match.always():
1518 # cache for performance
1520 # cache for performance
1519 if s.unknown or s.ignored or s.clean:
1521 if s.unknown or s.ignored or s.clean:
1520 # "_status" is cached with list*=False in the normal route
1522 # "_status" is cached with list*=False in the normal route
1521 self._status = scmutil.status(s.modified, s.added, s.removed,
1523 self._status = scmutil.status(s.modified, s.added, s.removed,
1522 s.deleted, [], [], [])
1524 s.deleted, [], [], [])
1523 else:
1525 else:
1524 self._status = s
1526 self._status = s
1525
1527
1526 return s
1528 return s
1527
1529
1528 def _buildstatus(self, other, s, match, listignored, listclean,
1530 def _buildstatus(self, other, s, match, listignored, listclean,
1529 listunknown):
1531 listunknown):
1530 """build a status with respect to another context
1532 """build a status with respect to another context
1531
1533
1532 This includes logic for maintaining the fast path of status when
1534 This includes logic for maintaining the fast path of status when
1533 comparing the working directory against its parent, which is to skip
1535 comparing the working directory against its parent, which is to skip
1534 building a new manifest if self (working directory) is not comparing
1536 building a new manifest if self (working directory) is not comparing
1535 against its parent (repo['.']).
1537 against its parent (repo['.']).
1536 """
1538 """
1537 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1539 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1538 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1540 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1539 # might have accidentally ended up with the entire contents of the file
1541 # might have accidentally ended up with the entire contents of the file
1540 # they are supposed to be linking to.
1542 # they are supposed to be linking to.
1541 s.modified[:] = self._filtersuspectsymlink(s.modified)
1543 s.modified[:] = self._filtersuspectsymlink(s.modified)
1542 if other != self._repo['.']:
1544 if other != self._repo['.']:
1543 s = super(workingctx, self)._buildstatus(other, s, match,
1545 s = super(workingctx, self)._buildstatus(other, s, match,
1544 listignored, listclean,
1546 listignored, listclean,
1545 listunknown)
1547 listunknown)
1546 return s
1548 return s
1547
1549
1548 def _matchstatus(self, other, match):
1550 def _matchstatus(self, other, match):
1549 """override the match method with a filter for directory patterns
1551 """override the match method with a filter for directory patterns
1550
1552
1551 We use inheritance to customize the match.bad method only in cases of
1553 We use inheritance to customize the match.bad method only in cases of
1552 workingctx since it belongs only to the working directory when
1554 workingctx since it belongs only to the working directory when
1553 comparing against the parent changeset.
1555 comparing against the parent changeset.
1554
1556
1555 If we aren't comparing against the working directory's parent, then we
1557 If we aren't comparing against the working directory's parent, then we
1556 just use the default match object sent to us.
1558 just use the default match object sent to us.
1557 """
1559 """
1558 superself = super(workingctx, self)
1560 superself = super(workingctx, self)
1559 match = superself._matchstatus(other, match)
1561 match = superself._matchstatus(other, match)
1560 if other != self._repo['.']:
1562 if other != self._repo['.']:
1561 def bad(f, msg):
1563 def bad(f, msg):
1562 # 'f' may be a directory pattern from 'match.files()',
1564 # 'f' may be a directory pattern from 'match.files()',
1563 # so 'f not in ctx1' is not enough
1565 # so 'f not in ctx1' is not enough
1564 if f not in other and f not in other.dirs():
1566 if f not in other and f not in other.dirs():
1565 self._repo.ui.warn('%s: %s\n' %
1567 self._repo.ui.warn('%s: %s\n' %
1566 (self._repo.dirstate.pathto(f), msg))
1568 (self._repo.dirstate.pathto(f), msg))
1567 match.bad = bad
1569 match.bad = bad
1568 return match
1570 return match
1569
1571
1570 class committablefilectx(basefilectx):
1572 class committablefilectx(basefilectx):
1571 """A committablefilectx provides common functionality for a file context
1573 """A committablefilectx provides common functionality for a file context
1572 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1574 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1573 def __init__(self, repo, path, filelog=None, ctx=None):
1575 def __init__(self, repo, path, filelog=None, ctx=None):
1574 self._repo = repo
1576 self._repo = repo
1575 self._path = path
1577 self._path = path
1576 self._changeid = None
1578 self._changeid = None
1577 self._filerev = self._filenode = None
1579 self._filerev = self._filenode = None
1578
1580
1579 if filelog is not None:
1581 if filelog is not None:
1580 self._filelog = filelog
1582 self._filelog = filelog
1581 if ctx:
1583 if ctx:
1582 self._changectx = ctx
1584 self._changectx = ctx
1583
1585
1584 def __nonzero__(self):
1586 def __nonzero__(self):
1585 return True
1587 return True
1586
1588
1587 def parents(self):
1589 def parents(self):
1588 '''return parent filectxs, following copies if necessary'''
1590 '''return parent filectxs, following copies if necessary'''
1589 def filenode(ctx, path):
1591 def filenode(ctx, path):
1590 return ctx._manifest.get(path, nullid)
1592 return ctx._manifest.get(path, nullid)
1591
1593
1592 path = self._path
1594 path = self._path
1593 fl = self._filelog
1595 fl = self._filelog
1594 pcl = self._changectx._parents
1596 pcl = self._changectx._parents
1595 renamed = self.renamed()
1597 renamed = self.renamed()
1596
1598
1597 if renamed:
1599 if renamed:
1598 pl = [renamed + (None,)]
1600 pl = [renamed + (None,)]
1599 else:
1601 else:
1600 pl = [(path, filenode(pcl[0], path), fl)]
1602 pl = [(path, filenode(pcl[0], path), fl)]
1601
1603
1602 for pc in pcl[1:]:
1604 for pc in pcl[1:]:
1603 pl.append((path, filenode(pc, path), fl))
1605 pl.append((path, filenode(pc, path), fl))
1604
1606
1605 return [filectx(self._repo, p, fileid=n, filelog=l)
1607 return [filectx(self._repo, p, fileid=n, filelog=l)
1606 for p, n, l in pl if n != nullid]
1608 for p, n, l in pl if n != nullid]
1607
1609
1608 def children(self):
1610 def children(self):
1609 return []
1611 return []
1610
1612
1611 class workingfilectx(committablefilectx):
1613 class workingfilectx(committablefilectx):
1612 """A workingfilectx object makes access to data related to a particular
1614 """A workingfilectx object makes access to data related to a particular
1613 file in the working directory convenient."""
1615 file in the working directory convenient."""
1614 def __init__(self, repo, path, filelog=None, workingctx=None):
1616 def __init__(self, repo, path, filelog=None, workingctx=None):
1615 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1617 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1616
1618
1617 @propertycache
1619 @propertycache
1618 def _changectx(self):
1620 def _changectx(self):
1619 return workingctx(self._repo)
1621 return workingctx(self._repo)
1620
1622
1621 def data(self):
1623 def data(self):
1622 return self._repo.wread(self._path)
1624 return self._repo.wread(self._path)
1623 def renamed(self):
1625 def renamed(self):
1624 rp = self._repo.dirstate.copied(self._path)
1626 rp = self._repo.dirstate.copied(self._path)
1625 if not rp:
1627 if not rp:
1626 return None
1628 return None
1627 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1629 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1628
1630
1629 def size(self):
1631 def size(self):
1630 return self._repo.wvfs.lstat(self._path).st_size
1632 return self._repo.wvfs.lstat(self._path).st_size
1631 def date(self):
1633 def date(self):
1632 t, tz = self._changectx.date()
1634 t, tz = self._changectx.date()
1633 try:
1635 try:
1634 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1636 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1635 except OSError, err:
1637 except OSError, err:
1636 if err.errno != errno.ENOENT:
1638 if err.errno != errno.ENOENT:
1637 raise
1639 raise
1638 return (t, tz)
1640 return (t, tz)
1639
1641
1640 def cmp(self, fctx):
1642 def cmp(self, fctx):
1641 """compare with other file context
1643 """compare with other file context
1642
1644
1643 returns True if different than fctx.
1645 returns True if different than fctx.
1644 """
1646 """
1645 # fctx should be a filectx (not a workingfilectx)
1647 # fctx should be a filectx (not a workingfilectx)
1646 # invert comparison to reuse the same code path
1648 # invert comparison to reuse the same code path
1647 return fctx.cmp(self)
1649 return fctx.cmp(self)
1648
1650
1649 def remove(self, ignoremissing=False):
1651 def remove(self, ignoremissing=False):
1650 """wraps unlink for a repo's working directory"""
1652 """wraps unlink for a repo's working directory"""
1651 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1653 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1652
1654
1653 def write(self, data, flags):
1655 def write(self, data, flags):
1654 """wraps repo.wwrite"""
1656 """wraps repo.wwrite"""
1655 self._repo.wwrite(self._path, data, flags)
1657 self._repo.wwrite(self._path, data, flags)
1656
1658
1657 class workingcommitctx(workingctx):
1659 class workingcommitctx(workingctx):
1658 """A workingcommitctx object makes access to data related to
1660 """A workingcommitctx object makes access to data related to
1659 the revision being committed convenient.
1661 the revision being committed convenient.
1660
1662
1661 This hides changes in the working directory, if they aren't
1663 This hides changes in the working directory, if they aren't
1662 committed in this context.
1664 committed in this context.
1663 """
1665 """
1664 def __init__(self, repo, changes,
1666 def __init__(self, repo, changes,
1665 text="", user=None, date=None, extra=None):
1667 text="", user=None, date=None, extra=None):
1666 super(workingctx, self).__init__(repo, text, user, date, extra,
1668 super(workingctx, self).__init__(repo, text, user, date, extra,
1667 changes)
1669 changes)
1668
1670
1669 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1671 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1670 unknown=False):
1672 unknown=False):
1671 """Return matched files only in ``self._status``
1673 """Return matched files only in ``self._status``
1672
1674
1673 Uncommitted files appear "clean" via this context, even if
1675 Uncommitted files appear "clean" via this context, even if
1674 they aren't actually so in the working directory.
1676 they aren't actually so in the working directory.
1675 """
1677 """
1676 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1678 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1677 if clean:
1679 if clean:
1678 clean = [f for f in self._manifest if f not in self._changedset]
1680 clean = [f for f in self._manifest if f not in self._changedset]
1679 else:
1681 else:
1680 clean = []
1682 clean = []
1681 return scmutil.status([f for f in self._status.modified if match(f)],
1683 return scmutil.status([f for f in self._status.modified if match(f)],
1682 [f for f in self._status.added if match(f)],
1684 [f for f in self._status.added if match(f)],
1683 [f for f in self._status.removed if match(f)],
1685 [f for f in self._status.removed if match(f)],
1684 [], [], [], clean)
1686 [], [], [], clean)
1685
1687
1686 @propertycache
1688 @propertycache
1687 def _changedset(self):
1689 def _changedset(self):
1688 """Return the set of files changed in this context
1690 """Return the set of files changed in this context
1689 """
1691 """
1690 changed = set(self._status.modified)
1692 changed = set(self._status.modified)
1691 changed.update(self._status.added)
1693 changed.update(self._status.added)
1692 changed.update(self._status.removed)
1694 changed.update(self._status.removed)
1693 return changed
1695 return changed
1694
1696
1695 class memctx(committablectx):
1697 class memctx(committablectx):
1696 """Use memctx to perform in-memory commits via localrepo.commitctx().
1698 """Use memctx to perform in-memory commits via localrepo.commitctx().
1697
1699
1698 Revision information is supplied at initialization time while
1700 Revision information is supplied at initialization time while
1699 related files data and is made available through a callback
1701 related files data and is made available through a callback
1700 mechanism. 'repo' is the current localrepo, 'parents' is a
1702 mechanism. 'repo' is the current localrepo, 'parents' is a
1701 sequence of two parent revisions identifiers (pass None for every
1703 sequence of two parent revisions identifiers (pass None for every
1702 missing parent), 'text' is the commit message and 'files' lists
1704 missing parent), 'text' is the commit message and 'files' lists
1703 names of files touched by the revision (normalized and relative to
1705 names of files touched by the revision (normalized and relative to
1704 repository root).
1706 repository root).
1705
1707
1706 filectxfn(repo, memctx, path) is a callable receiving the
1708 filectxfn(repo, memctx, path) is a callable receiving the
1707 repository, the current memctx object and the normalized path of
1709 repository, the current memctx object and the normalized path of
1708 requested file, relative to repository root. It is fired by the
1710 requested file, relative to repository root. It is fired by the
1709 commit function for every file in 'files', but calls order is
1711 commit function for every file in 'files', but calls order is
1710 undefined. If the file is available in the revision being
1712 undefined. If the file is available in the revision being
1711 committed (updated or added), filectxfn returns a memfilectx
1713 committed (updated or added), filectxfn returns a memfilectx
1712 object. If the file was removed, filectxfn raises an
1714 object. If the file was removed, filectxfn raises an
1713 IOError. Moved files are represented by marking the source file
1715 IOError. Moved files are represented by marking the source file
1714 removed and the new file added with copy information (see
1716 removed and the new file added with copy information (see
1715 memfilectx).
1717 memfilectx).
1716
1718
1717 user receives the committer name and defaults to current
1719 user receives the committer name and defaults to current
1718 repository username, date is the commit date in any format
1720 repository username, date is the commit date in any format
1719 supported by util.parsedate() and defaults to current date, extra
1721 supported by util.parsedate() and defaults to current date, extra
1720 is a dictionary of metadata or is left empty.
1722 is a dictionary of metadata or is left empty.
1721 """
1723 """
1722
1724
1723 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1725 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1724 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1726 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1725 # this field to determine what to do in filectxfn.
1727 # this field to determine what to do in filectxfn.
1726 _returnnoneformissingfiles = True
1728 _returnnoneformissingfiles = True
1727
1729
1728 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1730 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1729 date=None, extra=None, editor=False):
1731 date=None, extra=None, editor=False):
1730 super(memctx, self).__init__(repo, text, user, date, extra)
1732 super(memctx, self).__init__(repo, text, user, date, extra)
1731 self._rev = None
1733 self._rev = None
1732 self._node = None
1734 self._node = None
1733 parents = [(p or nullid) for p in parents]
1735 parents = [(p or nullid) for p in parents]
1734 p1, p2 = parents
1736 p1, p2 = parents
1735 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1737 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1736 files = sorted(set(files))
1738 files = sorted(set(files))
1737 self._files = files
1739 self._files = files
1738 self.substate = {}
1740 self.substate = {}
1739
1741
1740 # if store is not callable, wrap it in a function
1742 # if store is not callable, wrap it in a function
1741 if not callable(filectxfn):
1743 if not callable(filectxfn):
1742 def getfilectx(repo, memctx, path):
1744 def getfilectx(repo, memctx, path):
1743 fctx = filectxfn[path]
1745 fctx = filectxfn[path]
1744 # this is weird but apparently we only keep track of one parent
1746 # this is weird but apparently we only keep track of one parent
1745 # (why not only store that instead of a tuple?)
1747 # (why not only store that instead of a tuple?)
1746 copied = fctx.renamed()
1748 copied = fctx.renamed()
1747 if copied:
1749 if copied:
1748 copied = copied[0]
1750 copied = copied[0]
1749 return memfilectx(repo, path, fctx.data(),
1751 return memfilectx(repo, path, fctx.data(),
1750 islink=fctx.islink(), isexec=fctx.isexec(),
1752 islink=fctx.islink(), isexec=fctx.isexec(),
1751 copied=copied, memctx=memctx)
1753 copied=copied, memctx=memctx)
1752 self._filectxfn = getfilectx
1754 self._filectxfn = getfilectx
1753 else:
1755 else:
1754 # "util.cachefunc" reduces invocation of possibly expensive
1756 # "util.cachefunc" reduces invocation of possibly expensive
1755 # "filectxfn" for performance (e.g. converting from another VCS)
1757 # "filectxfn" for performance (e.g. converting from another VCS)
1756 self._filectxfn = util.cachefunc(filectxfn)
1758 self._filectxfn = util.cachefunc(filectxfn)
1757
1759
1758 self._extra = extra and extra.copy() or {}
1760 self._extra = extra and extra.copy() or {}
1759 if self._extra.get('branch', '') == '':
1761 if self._extra.get('branch', '') == '':
1760 self._extra['branch'] = 'default'
1762 self._extra['branch'] = 'default'
1761
1763
1762 if editor:
1764 if editor:
1763 self._text = editor(self._repo, self, [])
1765 self._text = editor(self._repo, self, [])
1764 self._repo.savecommitmessage(self._text)
1766 self._repo.savecommitmessage(self._text)
1765
1767
1766 def filectx(self, path, filelog=None):
1768 def filectx(self, path, filelog=None):
1767 """get a file context from the working directory
1769 """get a file context from the working directory
1768
1770
1769 Returns None if file doesn't exist and should be removed."""
1771 Returns None if file doesn't exist and should be removed."""
1770 return self._filectxfn(self._repo, self, path)
1772 return self._filectxfn(self._repo, self, path)
1771
1773
1772 def commit(self):
1774 def commit(self):
1773 """commit context to the repo"""
1775 """commit context to the repo"""
1774 return self._repo.commitctx(self)
1776 return self._repo.commitctx(self)
1775
1777
1776 @propertycache
1778 @propertycache
1777 def _manifest(self):
1779 def _manifest(self):
1778 """generate a manifest based on the return values of filectxfn"""
1780 """generate a manifest based on the return values of filectxfn"""
1779
1781
1780 # keep this simple for now; just worry about p1
1782 # keep this simple for now; just worry about p1
1781 pctx = self._parents[0]
1783 pctx = self._parents[0]
1782 man = pctx.manifest().copy()
1784 man = pctx.manifest().copy()
1783
1785
1784 for f in self._status.modified:
1786 for f in self._status.modified:
1785 p1node = nullid
1787 p1node = nullid
1786 p2node = nullid
1788 p2node = nullid
1787 p = pctx[f].parents() # if file isn't in pctx, check p2?
1789 p = pctx[f].parents() # if file isn't in pctx, check p2?
1788 if len(p) > 0:
1790 if len(p) > 0:
1789 p1node = p[0].node()
1791 p1node = p[0].node()
1790 if len(p) > 1:
1792 if len(p) > 1:
1791 p2node = p[1].node()
1793 p2node = p[1].node()
1792 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1794 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1793
1795
1794 for f in self._status.added:
1796 for f in self._status.added:
1795 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1797 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1796
1798
1797 for f in self._status.removed:
1799 for f in self._status.removed:
1798 if f in man:
1800 if f in man:
1799 del man[f]
1801 del man[f]
1800
1802
1801 return man
1803 return man
1802
1804
1803 @propertycache
1805 @propertycache
1804 def _status(self):
1806 def _status(self):
1805 """Calculate exact status from ``files`` specified at construction
1807 """Calculate exact status from ``files`` specified at construction
1806 """
1808 """
1807 man1 = self.p1().manifest()
1809 man1 = self.p1().manifest()
1808 p2 = self._parents[1]
1810 p2 = self._parents[1]
1809 # "1 < len(self._parents)" can't be used for checking
1811 # "1 < len(self._parents)" can't be used for checking
1810 # existence of the 2nd parent, because "memctx._parents" is
1812 # existence of the 2nd parent, because "memctx._parents" is
1811 # explicitly initialized by the list, of which length is 2.
1813 # explicitly initialized by the list, of which length is 2.
1812 if p2.node() != nullid:
1814 if p2.node() != nullid:
1813 man2 = p2.manifest()
1815 man2 = p2.manifest()
1814 managing = lambda f: f in man1 or f in man2
1816 managing = lambda f: f in man1 or f in man2
1815 else:
1817 else:
1816 managing = lambda f: f in man1
1818 managing = lambda f: f in man1
1817
1819
1818 modified, added, removed = [], [], []
1820 modified, added, removed = [], [], []
1819 for f in self._files:
1821 for f in self._files:
1820 if not managing(f):
1822 if not managing(f):
1821 added.append(f)
1823 added.append(f)
1822 elif self[f]:
1824 elif self[f]:
1823 modified.append(f)
1825 modified.append(f)
1824 else:
1826 else:
1825 removed.append(f)
1827 removed.append(f)
1826
1828
1827 return scmutil.status(modified, added, removed, [], [], [], [])
1829 return scmutil.status(modified, added, removed, [], [], [], [])
1828
1830
1829 class memfilectx(committablefilectx):
1831 class memfilectx(committablefilectx):
1830 """memfilectx represents an in-memory file to commit.
1832 """memfilectx represents an in-memory file to commit.
1831
1833
1832 See memctx and committablefilectx for more details.
1834 See memctx and committablefilectx for more details.
1833 """
1835 """
1834 def __init__(self, repo, path, data, islink=False,
1836 def __init__(self, repo, path, data, islink=False,
1835 isexec=False, copied=None, memctx=None):
1837 isexec=False, copied=None, memctx=None):
1836 """
1838 """
1837 path is the normalized file path relative to repository root.
1839 path is the normalized file path relative to repository root.
1838 data is the file content as a string.
1840 data is the file content as a string.
1839 islink is True if the file is a symbolic link.
1841 islink is True if the file is a symbolic link.
1840 isexec is True if the file is executable.
1842 isexec is True if the file is executable.
1841 copied is the source file path if current file was copied in the
1843 copied is the source file path if current file was copied in the
1842 revision being committed, or None."""
1844 revision being committed, or None."""
1843 super(memfilectx, self).__init__(repo, path, None, memctx)
1845 super(memfilectx, self).__init__(repo, path, None, memctx)
1844 self._data = data
1846 self._data = data
1845 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1847 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1846 self._copied = None
1848 self._copied = None
1847 if copied:
1849 if copied:
1848 self._copied = (copied, nullid)
1850 self._copied = (copied, nullid)
1849
1851
1850 def data(self):
1852 def data(self):
1851 return self._data
1853 return self._data
1852 def size(self):
1854 def size(self):
1853 return len(self.data())
1855 return len(self.data())
1854 def flags(self):
1856 def flags(self):
1855 return self._flags
1857 return self._flags
1856 def renamed(self):
1858 def renamed(self):
1857 return self._copied
1859 return self._copied
1858
1860
1859 def remove(self, ignoremissing=False):
1861 def remove(self, ignoremissing=False):
1860 """wraps unlink for a repo's working directory"""
1862 """wraps unlink for a repo's working directory"""
1861 # need to figure out what to do here
1863 # need to figure out what to do here
1862 del self._changectx[self._path]
1864 del self._changectx[self._path]
1863
1865
1864 def write(self, data, flags):
1866 def write(self, data, flags):
1865 """wraps repo.wwrite"""
1867 """wraps repo.wwrite"""
1866 self._data = data
1868 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now