##// END OF EJS Templates
context: add a repo accessor...
Matt Harbison -
r24300:a0731447 default
parent child Browse files
Show More
@@ -1,1864 +1,1866
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 return iter(self._manifest)
69 return iter(self._manifest)
70
70
71 def _manifestmatches(self, match, s):
71 def _manifestmatches(self, match, s):
72 """generate a new manifest filtered by the match argument
72 """generate a new manifest filtered by the match argument
73
73
74 This method is for internal use only and mainly exists to provide an
74 This method is for internal use only and mainly exists to provide an
75 object oriented way for other contexts to customize the manifest
75 object oriented way for other contexts to customize the manifest
76 generation.
76 generation.
77 """
77 """
78 return self.manifest().matches(match)
78 return self.manifest().matches(match)
79
79
80 def _matchstatus(self, other, match):
80 def _matchstatus(self, other, match):
81 """return match.always if match is none
81 """return match.always if match is none
82
82
83 This internal method provides a way for child objects to override the
83 This internal method provides a way for child objects to override the
84 match operator.
84 match operator.
85 """
85 """
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
86 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87
87
88 def _buildstatus(self, other, s, match, listignored, listclean,
88 def _buildstatus(self, other, s, match, listignored, listclean,
89 listunknown):
89 listunknown):
90 """build a status with respect to another context"""
90 """build a status with respect to another context"""
91 # Load earliest manifest first for caching reasons. More specifically,
91 # Load earliest manifest first for caching reasons. More specifically,
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
92 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
93 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # 1000 and cache it so that when you read 1001, we just need to apply a
94 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # delta to what's in the cache. So that's one full reconstruction + one
95 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta application.
96 # delta application.
97 if self.rev() is not None and self.rev() < other.rev():
97 if self.rev() is not None and self.rev() < other.rev():
98 self.manifest()
98 self.manifest()
99 mf1 = other._manifestmatches(match, s)
99 mf1 = other._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
100 mf2 = self._manifestmatches(match, s)
101
101
102 modified, added = [], []
102 modified, added = [], []
103 removed = []
103 removed = []
104 clean = []
104 clean = []
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
105 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
106 deletedset = set(deleted)
106 deletedset = set(deleted)
107 d = mf1.diff(mf2, clean=listclean)
107 d = mf1.diff(mf2, clean=listclean)
108 for fn, value in d.iteritems():
108 for fn, value in d.iteritems():
109 if fn in deletedset:
109 if fn in deletedset:
110 continue
110 continue
111 if value is None:
111 if value is None:
112 clean.append(fn)
112 clean.append(fn)
113 continue
113 continue
114 (node1, flag1), (node2, flag2) = value
114 (node1, flag1), (node2, flag2) = value
115 if node1 is None:
115 if node1 is None:
116 added.append(fn)
116 added.append(fn)
117 elif node2 is None:
117 elif node2 is None:
118 removed.append(fn)
118 removed.append(fn)
119 elif node2 != _newnode:
119 elif node2 != _newnode:
120 # The file was not a new file in mf2, so an entry
120 # The file was not a new file in mf2, so an entry
121 # from diff is really a difference.
121 # from diff is really a difference.
122 modified.append(fn)
122 modified.append(fn)
123 elif self[fn].cmp(other[fn]):
123 elif self[fn].cmp(other[fn]):
124 # node2 was newnode, but the working file doesn't
124 # node2 was newnode, but the working file doesn't
125 # match the one in mf1.
125 # match the one in mf1.
126 modified.append(fn)
126 modified.append(fn)
127 else:
127 else:
128 clean.append(fn)
128 clean.append(fn)
129
129
130 if removed:
130 if removed:
131 # need to filter files if they are already reported as removed
131 # need to filter files if they are already reported as removed
132 unknown = [fn for fn in unknown if fn not in mf1]
132 unknown = [fn for fn in unknown if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
133 ignored = [fn for fn in ignored if fn not in mf1]
134 # if they're deleted, don't report them as removed
134 # if they're deleted, don't report them as removed
135 removed = [fn for fn in removed if fn not in deletedset]
135 removed = [fn for fn in removed if fn not in deletedset]
136
136
137 return scmutil.status(modified, added, removed, deleted, unknown,
137 return scmutil.status(modified, added, removed, deleted, unknown,
138 ignored, clean)
138 ignored, clean)
139
139
140 @propertycache
140 @propertycache
141 def substate(self):
141 def substate(self):
142 return subrepo.state(self, self._repo.ui)
142 return subrepo.state(self, self._repo.ui)
143
143
144 def subrev(self, subpath):
144 def subrev(self, subpath):
145 return self.substate[subpath][1]
145 return self.substate[subpath][1]
146
146
147 def rev(self):
147 def rev(self):
148 return self._rev
148 return self._rev
149 def node(self):
149 def node(self):
150 return self._node
150 return self._node
151 def hex(self):
151 def hex(self):
152 return hex(self.node())
152 return hex(self.node())
153 def manifest(self):
153 def manifest(self):
154 return self._manifest
154 return self._manifest
155 def repo(self):
156 return self._repo
155 def phasestr(self):
157 def phasestr(self):
156 return phases.phasenames[self.phase()]
158 return phases.phasenames[self.phase()]
157 def mutable(self):
159 def mutable(self):
158 return self.phase() > phases.public
160 return self.phase() > phases.public
159
161
160 def getfileset(self, expr):
162 def getfileset(self, expr):
161 return fileset.getfileset(self, expr)
163 return fileset.getfileset(self, expr)
162
164
163 def obsolete(self):
165 def obsolete(self):
164 """True if the changeset is obsolete"""
166 """True if the changeset is obsolete"""
165 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
167 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
166
168
167 def extinct(self):
169 def extinct(self):
168 """True if the changeset is extinct"""
170 """True if the changeset is extinct"""
169 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
171 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
170
172
171 def unstable(self):
173 def unstable(self):
172 """True if the changeset is not obsolete but it's ancestor are"""
174 """True if the changeset is not obsolete but it's ancestor are"""
173 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
175 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
174
176
175 def bumped(self):
177 def bumped(self):
176 """True if the changeset try to be a successor of a public changeset
178 """True if the changeset try to be a successor of a public changeset
177
179
178 Only non-public and non-obsolete changesets may be bumped.
180 Only non-public and non-obsolete changesets may be bumped.
179 """
181 """
180 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
182 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
181
183
182 def divergent(self):
184 def divergent(self):
183 """Is a successors of a changeset with multiple possible successors set
185 """Is a successors of a changeset with multiple possible successors set
184
186
185 Only non-public and non-obsolete changesets may be divergent.
187 Only non-public and non-obsolete changesets may be divergent.
186 """
188 """
187 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
189 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
188
190
189 def troubled(self):
191 def troubled(self):
190 """True if the changeset is either unstable, bumped or divergent"""
192 """True if the changeset is either unstable, bumped or divergent"""
191 return self.unstable() or self.bumped() or self.divergent()
193 return self.unstable() or self.bumped() or self.divergent()
192
194
193 def troubles(self):
195 def troubles(self):
194 """return the list of troubles affecting this changesets.
196 """return the list of troubles affecting this changesets.
195
197
196 Troubles are returned as strings. possible values are:
198 Troubles are returned as strings. possible values are:
197 - unstable,
199 - unstable,
198 - bumped,
200 - bumped,
199 - divergent.
201 - divergent.
200 """
202 """
201 troubles = []
203 troubles = []
202 if self.unstable():
204 if self.unstable():
203 troubles.append('unstable')
205 troubles.append('unstable')
204 if self.bumped():
206 if self.bumped():
205 troubles.append('bumped')
207 troubles.append('bumped')
206 if self.divergent():
208 if self.divergent():
207 troubles.append('divergent')
209 troubles.append('divergent')
208 return troubles
210 return troubles
209
211
210 def parents(self):
212 def parents(self):
211 """return contexts for each parent changeset"""
213 """return contexts for each parent changeset"""
212 return self._parents
214 return self._parents
213
215
214 def p1(self):
216 def p1(self):
215 return self._parents[0]
217 return self._parents[0]
216
218
217 def p2(self):
219 def p2(self):
218 if len(self._parents) == 2:
220 if len(self._parents) == 2:
219 return self._parents[1]
221 return self._parents[1]
220 return changectx(self._repo, -1)
222 return changectx(self._repo, -1)
221
223
222 def _fileinfo(self, path):
224 def _fileinfo(self, path):
223 if '_manifest' in self.__dict__:
225 if '_manifest' in self.__dict__:
224 try:
226 try:
225 return self._manifest[path], self._manifest.flags(path)
227 return self._manifest[path], self._manifest.flags(path)
226 except KeyError:
228 except KeyError:
227 raise error.ManifestLookupError(self._node, path,
229 raise error.ManifestLookupError(self._node, path,
228 _('not found in manifest'))
230 _('not found in manifest'))
229 if '_manifestdelta' in self.__dict__ or path in self.files():
231 if '_manifestdelta' in self.__dict__ or path in self.files():
230 if path in self._manifestdelta:
232 if path in self._manifestdelta:
231 return (self._manifestdelta[path],
233 return (self._manifestdelta[path],
232 self._manifestdelta.flags(path))
234 self._manifestdelta.flags(path))
233 node, flag = self._repo.manifest.find(self._changeset[0], path)
235 node, flag = self._repo.manifest.find(self._changeset[0], path)
234 if not node:
236 if not node:
235 raise error.ManifestLookupError(self._node, path,
237 raise error.ManifestLookupError(self._node, path,
236 _('not found in manifest'))
238 _('not found in manifest'))
237
239
238 return node, flag
240 return node, flag
239
241
240 def filenode(self, path):
242 def filenode(self, path):
241 return self._fileinfo(path)[0]
243 return self._fileinfo(path)[0]
242
244
243 def flags(self, path):
245 def flags(self, path):
244 try:
246 try:
245 return self._fileinfo(path)[1]
247 return self._fileinfo(path)[1]
246 except error.LookupError:
248 except error.LookupError:
247 return ''
249 return ''
248
250
249 def sub(self, path):
251 def sub(self, path):
250 return subrepo.subrepo(self, path)
252 return subrepo.subrepo(self, path)
251
253
252 def match(self, pats=[], include=None, exclude=None, default='glob'):
254 def match(self, pats=[], include=None, exclude=None, default='glob'):
253 r = self._repo
255 r = self._repo
254 return matchmod.match(r.root, r.getcwd(), pats,
256 return matchmod.match(r.root, r.getcwd(), pats,
255 include, exclude, default,
257 include, exclude, default,
256 auditor=r.auditor, ctx=self)
258 auditor=r.auditor, ctx=self)
257
259
258 def diff(self, ctx2=None, match=None, **opts):
260 def diff(self, ctx2=None, match=None, **opts):
259 """Returns a diff generator for the given contexts and matcher"""
261 """Returns a diff generator for the given contexts and matcher"""
260 if ctx2 is None:
262 if ctx2 is None:
261 ctx2 = self.p1()
263 ctx2 = self.p1()
262 if ctx2 is not None:
264 if ctx2 is not None:
263 ctx2 = self._repo[ctx2]
265 ctx2 = self._repo[ctx2]
264 diffopts = patch.diffopts(self._repo.ui, opts)
266 diffopts = patch.diffopts(self._repo.ui, opts)
265 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
267 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
266
268
267 @propertycache
269 @propertycache
268 def _dirs(self):
270 def _dirs(self):
269 return scmutil.dirs(self._manifest)
271 return scmutil.dirs(self._manifest)
270
272
271 def dirs(self):
273 def dirs(self):
272 return self._dirs
274 return self._dirs
273
275
274 def dirty(self, missing=False, merge=True, branch=True):
276 def dirty(self, missing=False, merge=True, branch=True):
275 return False
277 return False
276
278
277 def status(self, other=None, match=None, listignored=False,
279 def status(self, other=None, match=None, listignored=False,
278 listclean=False, listunknown=False, listsubrepos=False):
280 listclean=False, listunknown=False, listsubrepos=False):
279 """return status of files between two nodes or node and working
281 """return status of files between two nodes or node and working
280 directory.
282 directory.
281
283
282 If other is None, compare this node with working directory.
284 If other is None, compare this node with working directory.
283
285
284 returns (modified, added, removed, deleted, unknown, ignored, clean)
286 returns (modified, added, removed, deleted, unknown, ignored, clean)
285 """
287 """
286
288
287 ctx1 = self
289 ctx1 = self
288 ctx2 = self._repo[other]
290 ctx2 = self._repo[other]
289
291
290 # This next code block is, admittedly, fragile logic that tests for
292 # This next code block is, admittedly, fragile logic that tests for
291 # reversing the contexts and wouldn't need to exist if it weren't for
293 # reversing the contexts and wouldn't need to exist if it weren't for
292 # the fast (and common) code path of comparing the working directory
294 # the fast (and common) code path of comparing the working directory
293 # with its first parent.
295 # with its first parent.
294 #
296 #
295 # What we're aiming for here is the ability to call:
297 # What we're aiming for here is the ability to call:
296 #
298 #
297 # workingctx.status(parentctx)
299 # workingctx.status(parentctx)
298 #
300 #
299 # If we always built the manifest for each context and compared those,
301 # If we always built the manifest for each context and compared those,
300 # then we'd be done. But the special case of the above call means we
302 # then we'd be done. But the special case of the above call means we
301 # just copy the manifest of the parent.
303 # just copy the manifest of the parent.
302 reversed = False
304 reversed = False
303 if (not isinstance(ctx1, changectx)
305 if (not isinstance(ctx1, changectx)
304 and isinstance(ctx2, changectx)):
306 and isinstance(ctx2, changectx)):
305 reversed = True
307 reversed = True
306 ctx1, ctx2 = ctx2, ctx1
308 ctx1, ctx2 = ctx2, ctx1
307
309
308 match = ctx2._matchstatus(ctx1, match)
310 match = ctx2._matchstatus(ctx1, match)
309 r = scmutil.status([], [], [], [], [], [], [])
311 r = scmutil.status([], [], [], [], [], [], [])
310 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
312 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
311 listunknown)
313 listunknown)
312
314
313 if reversed:
315 if reversed:
314 # Reverse added and removed. Clear deleted, unknown and ignored as
316 # Reverse added and removed. Clear deleted, unknown and ignored as
315 # these make no sense to reverse.
317 # these make no sense to reverse.
316 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
318 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
317 r.clean)
319 r.clean)
318
320
319 if listsubrepos:
321 if listsubrepos:
320 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
322 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
321 rev2 = ctx2.subrev(subpath)
323 rev2 = ctx2.subrev(subpath)
322 try:
324 try:
323 submatch = matchmod.narrowmatcher(subpath, match)
325 submatch = matchmod.narrowmatcher(subpath, match)
324 s = sub.status(rev2, match=submatch, ignored=listignored,
326 s = sub.status(rev2, match=submatch, ignored=listignored,
325 clean=listclean, unknown=listunknown,
327 clean=listclean, unknown=listunknown,
326 listsubrepos=True)
328 listsubrepos=True)
327 for rfiles, sfiles in zip(r, s):
329 for rfiles, sfiles in zip(r, s):
328 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
330 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
329 except error.LookupError:
331 except error.LookupError:
330 self._repo.ui.status(_("skipping missing "
332 self._repo.ui.status(_("skipping missing "
331 "subrepository: %s\n") % subpath)
333 "subrepository: %s\n") % subpath)
332
334
333 for l in r:
335 for l in r:
334 l.sort()
336 l.sort()
335
337
336 return r
338 return r
337
339
338
340
339 def makememctx(repo, parents, text, user, date, branch, files, store,
341 def makememctx(repo, parents, text, user, date, branch, files, store,
340 editor=None):
342 editor=None):
341 def getfilectx(repo, memctx, path):
343 def getfilectx(repo, memctx, path):
342 data, mode, copied = store.getfile(path)
344 data, mode, copied = store.getfile(path)
343 if data is None:
345 if data is None:
344 return None
346 return None
345 islink, isexec = mode
347 islink, isexec = mode
346 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
348 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
347 copied=copied, memctx=memctx)
349 copied=copied, memctx=memctx)
348 extra = {}
350 extra = {}
349 if branch:
351 if branch:
350 extra['branch'] = encoding.fromlocal(branch)
352 extra['branch'] = encoding.fromlocal(branch)
351 ctx = memctx(repo, parents, text, files, getfilectx, user,
353 ctx = memctx(repo, parents, text, files, getfilectx, user,
352 date, extra, editor)
354 date, extra, editor)
353 return ctx
355 return ctx
354
356
355 class changectx(basectx):
357 class changectx(basectx):
356 """A changecontext object makes access to data related to a particular
358 """A changecontext object makes access to data related to a particular
357 changeset convenient. It represents a read-only context already present in
359 changeset convenient. It represents a read-only context already present in
358 the repo."""
360 the repo."""
359 def __init__(self, repo, changeid=''):
361 def __init__(self, repo, changeid=''):
360 """changeid is a revision number, node, or tag"""
362 """changeid is a revision number, node, or tag"""
361
363
362 # since basectx.__new__ already took care of copying the object, we
364 # since basectx.__new__ already took care of copying the object, we
363 # don't need to do anything in __init__, so we just exit here
365 # don't need to do anything in __init__, so we just exit here
364 if isinstance(changeid, basectx):
366 if isinstance(changeid, basectx):
365 return
367 return
366
368
367 if changeid == '':
369 if changeid == '':
368 changeid = '.'
370 changeid = '.'
369 self._repo = repo
371 self._repo = repo
370
372
371 try:
373 try:
372 if isinstance(changeid, int):
374 if isinstance(changeid, int):
373 self._node = repo.changelog.node(changeid)
375 self._node = repo.changelog.node(changeid)
374 self._rev = changeid
376 self._rev = changeid
375 return
377 return
376 if isinstance(changeid, long):
378 if isinstance(changeid, long):
377 changeid = str(changeid)
379 changeid = str(changeid)
378 if changeid == 'null':
380 if changeid == 'null':
379 self._node = nullid
381 self._node = nullid
380 self._rev = nullrev
382 self._rev = nullrev
381 return
383 return
382 if changeid == 'tip':
384 if changeid == 'tip':
383 self._node = repo.changelog.tip()
385 self._node = repo.changelog.tip()
384 self._rev = repo.changelog.rev(self._node)
386 self._rev = repo.changelog.rev(self._node)
385 return
387 return
386 if changeid == '.' or changeid == repo.dirstate.p1():
388 if changeid == '.' or changeid == repo.dirstate.p1():
387 # this is a hack to delay/avoid loading obsmarkers
389 # this is a hack to delay/avoid loading obsmarkers
388 # when we know that '.' won't be hidden
390 # when we know that '.' won't be hidden
389 self._node = repo.dirstate.p1()
391 self._node = repo.dirstate.p1()
390 self._rev = repo.unfiltered().changelog.rev(self._node)
392 self._rev = repo.unfiltered().changelog.rev(self._node)
391 return
393 return
392 if len(changeid) == 20:
394 if len(changeid) == 20:
393 try:
395 try:
394 self._node = changeid
396 self._node = changeid
395 self._rev = repo.changelog.rev(changeid)
397 self._rev = repo.changelog.rev(changeid)
396 return
398 return
397 except error.FilteredRepoLookupError:
399 except error.FilteredRepoLookupError:
398 raise
400 raise
399 except LookupError:
401 except LookupError:
400 pass
402 pass
401
403
402 try:
404 try:
403 r = int(changeid)
405 r = int(changeid)
404 if str(r) != changeid:
406 if str(r) != changeid:
405 raise ValueError
407 raise ValueError
406 l = len(repo.changelog)
408 l = len(repo.changelog)
407 if r < 0:
409 if r < 0:
408 r += l
410 r += l
409 if r < 0 or r >= l:
411 if r < 0 or r >= l:
410 raise ValueError
412 raise ValueError
411 self._rev = r
413 self._rev = r
412 self._node = repo.changelog.node(r)
414 self._node = repo.changelog.node(r)
413 return
415 return
414 except error.FilteredIndexError:
416 except error.FilteredIndexError:
415 raise
417 raise
416 except (ValueError, OverflowError, IndexError):
418 except (ValueError, OverflowError, IndexError):
417 pass
419 pass
418
420
419 if len(changeid) == 40:
421 if len(changeid) == 40:
420 try:
422 try:
421 self._node = bin(changeid)
423 self._node = bin(changeid)
422 self._rev = repo.changelog.rev(self._node)
424 self._rev = repo.changelog.rev(self._node)
423 return
425 return
424 except error.FilteredLookupError:
426 except error.FilteredLookupError:
425 raise
427 raise
426 except (TypeError, LookupError):
428 except (TypeError, LookupError):
427 pass
429 pass
428
430
429 # lookup bookmarks through the name interface
431 # lookup bookmarks through the name interface
430 try:
432 try:
431 self._node = repo.names.singlenode(repo, changeid)
433 self._node = repo.names.singlenode(repo, changeid)
432 self._rev = repo.changelog.rev(self._node)
434 self._rev = repo.changelog.rev(self._node)
433 return
435 return
434 except KeyError:
436 except KeyError:
435 pass
437 pass
436 except error.FilteredRepoLookupError:
438 except error.FilteredRepoLookupError:
437 raise
439 raise
438 except error.RepoLookupError:
440 except error.RepoLookupError:
439 pass
441 pass
440
442
441 self._node = repo.unfiltered().changelog._partialmatch(changeid)
443 self._node = repo.unfiltered().changelog._partialmatch(changeid)
442 if self._node is not None:
444 if self._node is not None:
443 self._rev = repo.changelog.rev(self._node)
445 self._rev = repo.changelog.rev(self._node)
444 return
446 return
445
447
446 # lookup failed
448 # lookup failed
447 # check if it might have come from damaged dirstate
449 # check if it might have come from damaged dirstate
448 #
450 #
449 # XXX we could avoid the unfiltered if we had a recognizable
451 # XXX we could avoid the unfiltered if we had a recognizable
450 # exception for filtered changeset access
452 # exception for filtered changeset access
451 if changeid in repo.unfiltered().dirstate.parents():
453 if changeid in repo.unfiltered().dirstate.parents():
452 msg = _("working directory has unknown parent '%s'!")
454 msg = _("working directory has unknown parent '%s'!")
453 raise error.Abort(msg % short(changeid))
455 raise error.Abort(msg % short(changeid))
454 try:
456 try:
455 if len(changeid) == 20:
457 if len(changeid) == 20:
456 changeid = hex(changeid)
458 changeid = hex(changeid)
457 except TypeError:
459 except TypeError:
458 pass
460 pass
459 except (error.FilteredIndexError, error.FilteredLookupError,
461 except (error.FilteredIndexError, error.FilteredLookupError,
460 error.FilteredRepoLookupError):
462 error.FilteredRepoLookupError):
461 if repo.filtername == 'visible':
463 if repo.filtername == 'visible':
462 msg = _("hidden revision '%s'") % changeid
464 msg = _("hidden revision '%s'") % changeid
463 hint = _('use --hidden to access hidden revisions')
465 hint = _('use --hidden to access hidden revisions')
464 raise error.FilteredRepoLookupError(msg, hint=hint)
466 raise error.FilteredRepoLookupError(msg, hint=hint)
465 msg = _("filtered revision '%s' (not in '%s' subset)")
467 msg = _("filtered revision '%s' (not in '%s' subset)")
466 msg %= (changeid, repo.filtername)
468 msg %= (changeid, repo.filtername)
467 raise error.FilteredRepoLookupError(msg)
469 raise error.FilteredRepoLookupError(msg)
468 except IndexError:
470 except IndexError:
469 pass
471 pass
470 raise error.RepoLookupError(
472 raise error.RepoLookupError(
471 _("unknown revision '%s'") % changeid)
473 _("unknown revision '%s'") % changeid)
472
474
473 def __hash__(self):
475 def __hash__(self):
474 try:
476 try:
475 return hash(self._rev)
477 return hash(self._rev)
476 except AttributeError:
478 except AttributeError:
477 return id(self)
479 return id(self)
478
480
479 def __nonzero__(self):
481 def __nonzero__(self):
480 return self._rev != nullrev
482 return self._rev != nullrev
481
483
482 @propertycache
484 @propertycache
483 def _changeset(self):
485 def _changeset(self):
484 return self._repo.changelog.read(self.rev())
486 return self._repo.changelog.read(self.rev())
485
487
486 @propertycache
488 @propertycache
487 def _manifest(self):
489 def _manifest(self):
488 return self._repo.manifest.read(self._changeset[0])
490 return self._repo.manifest.read(self._changeset[0])
489
491
490 @propertycache
492 @propertycache
491 def _manifestdelta(self):
493 def _manifestdelta(self):
492 return self._repo.manifest.readdelta(self._changeset[0])
494 return self._repo.manifest.readdelta(self._changeset[0])
493
495
494 @propertycache
496 @propertycache
495 def _parents(self):
497 def _parents(self):
496 p = self._repo.changelog.parentrevs(self._rev)
498 p = self._repo.changelog.parentrevs(self._rev)
497 if p[1] == nullrev:
499 if p[1] == nullrev:
498 p = p[:-1]
500 p = p[:-1]
499 return [changectx(self._repo, x) for x in p]
501 return [changectx(self._repo, x) for x in p]
500
502
501 def changeset(self):
503 def changeset(self):
502 return self._changeset
504 return self._changeset
503 def manifestnode(self):
505 def manifestnode(self):
504 return self._changeset[0]
506 return self._changeset[0]
505
507
506 def user(self):
508 def user(self):
507 return self._changeset[1]
509 return self._changeset[1]
508 def date(self):
510 def date(self):
509 return self._changeset[2]
511 return self._changeset[2]
510 def files(self):
512 def files(self):
511 return self._changeset[3]
513 return self._changeset[3]
512 def description(self):
514 def description(self):
513 return self._changeset[4]
515 return self._changeset[4]
514 def branch(self):
516 def branch(self):
515 return encoding.tolocal(self._changeset[5].get("branch"))
517 return encoding.tolocal(self._changeset[5].get("branch"))
516 def closesbranch(self):
518 def closesbranch(self):
517 return 'close' in self._changeset[5]
519 return 'close' in self._changeset[5]
518 def extra(self):
520 def extra(self):
519 return self._changeset[5]
521 return self._changeset[5]
520 def tags(self):
522 def tags(self):
521 return self._repo.nodetags(self._node)
523 return self._repo.nodetags(self._node)
522 def bookmarks(self):
524 def bookmarks(self):
523 return self._repo.nodebookmarks(self._node)
525 return self._repo.nodebookmarks(self._node)
524 def phase(self):
526 def phase(self):
525 return self._repo._phasecache.phase(self._repo, self._rev)
527 return self._repo._phasecache.phase(self._repo, self._rev)
526 def hidden(self):
528 def hidden(self):
527 return self._rev in repoview.filterrevs(self._repo, 'visible')
529 return self._rev in repoview.filterrevs(self._repo, 'visible')
528
530
529 def children(self):
531 def children(self):
530 """return contexts for each child changeset"""
532 """return contexts for each child changeset"""
531 c = self._repo.changelog.children(self._node)
533 c = self._repo.changelog.children(self._node)
532 return [changectx(self._repo, x) for x in c]
534 return [changectx(self._repo, x) for x in c]
533
535
534 def ancestors(self):
536 def ancestors(self):
535 for a in self._repo.changelog.ancestors([self._rev]):
537 for a in self._repo.changelog.ancestors([self._rev]):
536 yield changectx(self._repo, a)
538 yield changectx(self._repo, a)
537
539
538 def descendants(self):
540 def descendants(self):
539 for d in self._repo.changelog.descendants([self._rev]):
541 for d in self._repo.changelog.descendants([self._rev]):
540 yield changectx(self._repo, d)
542 yield changectx(self._repo, d)
541
543
542 def filectx(self, path, fileid=None, filelog=None):
544 def filectx(self, path, fileid=None, filelog=None):
543 """get a file context from this changeset"""
545 """get a file context from this changeset"""
544 if fileid is None:
546 if fileid is None:
545 fileid = self.filenode(path)
547 fileid = self.filenode(path)
546 return filectx(self._repo, path, fileid=fileid,
548 return filectx(self._repo, path, fileid=fileid,
547 changectx=self, filelog=filelog)
549 changectx=self, filelog=filelog)
548
550
549 def ancestor(self, c2, warn=False):
551 def ancestor(self, c2, warn=False):
550 """return the "best" ancestor context of self and c2
552 """return the "best" ancestor context of self and c2
551
553
552 If there are multiple candidates, it will show a message and check
554 If there are multiple candidates, it will show a message and check
553 merge.preferancestor configuration before falling back to the
555 merge.preferancestor configuration before falling back to the
554 revlog ancestor."""
556 revlog ancestor."""
555 # deal with workingctxs
557 # deal with workingctxs
556 n2 = c2._node
558 n2 = c2._node
557 if n2 is None:
559 if n2 is None:
558 n2 = c2._parents[0]._node
560 n2 = c2._parents[0]._node
559 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
561 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
560 if not cahs:
562 if not cahs:
561 anc = nullid
563 anc = nullid
562 elif len(cahs) == 1:
564 elif len(cahs) == 1:
563 anc = cahs[0]
565 anc = cahs[0]
564 else:
566 else:
565 for r in self._repo.ui.configlist('merge', 'preferancestor'):
567 for r in self._repo.ui.configlist('merge', 'preferancestor'):
566 try:
568 try:
567 ctx = changectx(self._repo, r)
569 ctx = changectx(self._repo, r)
568 except error.RepoLookupError:
570 except error.RepoLookupError:
569 continue
571 continue
570 anc = ctx.node()
572 anc = ctx.node()
571 if anc in cahs:
573 if anc in cahs:
572 break
574 break
573 else:
575 else:
574 anc = self._repo.changelog.ancestor(self._node, n2)
576 anc = self._repo.changelog.ancestor(self._node, n2)
575 if warn:
577 if warn:
576 self._repo.ui.status(
578 self._repo.ui.status(
577 (_("note: using %s as ancestor of %s and %s\n") %
579 (_("note: using %s as ancestor of %s and %s\n") %
578 (short(anc), short(self._node), short(n2))) +
580 (short(anc), short(self._node), short(n2))) +
579 ''.join(_(" alternatively, use --config "
581 ''.join(_(" alternatively, use --config "
580 "merge.preferancestor=%s\n") %
582 "merge.preferancestor=%s\n") %
581 short(n) for n in sorted(cahs) if n != anc))
583 short(n) for n in sorted(cahs) if n != anc))
582 return changectx(self._repo, anc)
584 return changectx(self._repo, anc)
583
585
584 def descendant(self, other):
586 def descendant(self, other):
585 """True if other is descendant of this changeset"""
587 """True if other is descendant of this changeset"""
586 return self._repo.changelog.descendant(self._rev, other._rev)
588 return self._repo.changelog.descendant(self._rev, other._rev)
587
589
588 def walk(self, match):
590 def walk(self, match):
589 fset = set(match.files())
591 fset = set(match.files())
590 # for dirstate.walk, files=['.'] means "walk the whole tree".
592 # for dirstate.walk, files=['.'] means "walk the whole tree".
591 # follow that here, too
593 # follow that here, too
592 fset.discard('.')
594 fset.discard('.')
593
595
594 # avoid the entire walk if we're only looking for specific files
596 # avoid the entire walk if we're only looking for specific files
595 if fset and not match.anypats():
597 if fset and not match.anypats():
596 if util.all([fn in self for fn in fset]):
598 if util.all([fn in self for fn in fset]):
597 for fn in sorted(fset):
599 for fn in sorted(fset):
598 if match(fn):
600 if match(fn):
599 yield fn
601 yield fn
600 raise StopIteration
602 raise StopIteration
601
603
602 for fn in self:
604 for fn in self:
603 if fn in fset:
605 if fn in fset:
604 # specified pattern is the exact name
606 # specified pattern is the exact name
605 fset.remove(fn)
607 fset.remove(fn)
606 if match(fn):
608 if match(fn):
607 yield fn
609 yield fn
608 for fn in sorted(fset):
610 for fn in sorted(fset):
609 if fn in self._dirs:
611 if fn in self._dirs:
610 # specified pattern is a directory
612 # specified pattern is a directory
611 continue
613 continue
612 match.bad(fn, _('no such file in rev %s') % self)
614 match.bad(fn, _('no such file in rev %s') % self)
613
615
614 def matches(self, match):
616 def matches(self, match):
615 return self.walk(match)
617 return self.walk(match)
616
618
617 class basefilectx(object):
619 class basefilectx(object):
618 """A filecontext object represents the common logic for its children:
620 """A filecontext object represents the common logic for its children:
619 filectx: read-only access to a filerevision that is already present
621 filectx: read-only access to a filerevision that is already present
620 in the repo,
622 in the repo,
621 workingfilectx: a filecontext that represents files from the working
623 workingfilectx: a filecontext that represents files from the working
622 directory,
624 directory,
623 memfilectx: a filecontext that represents files in-memory."""
625 memfilectx: a filecontext that represents files in-memory."""
624 def __new__(cls, repo, path, *args, **kwargs):
626 def __new__(cls, repo, path, *args, **kwargs):
625 return super(basefilectx, cls).__new__(cls)
627 return super(basefilectx, cls).__new__(cls)
626
628
627 @propertycache
629 @propertycache
628 def _filelog(self):
630 def _filelog(self):
629 return self._repo.file(self._path)
631 return self._repo.file(self._path)
630
632
631 @propertycache
633 @propertycache
632 def _changeid(self):
634 def _changeid(self):
633 if '_changeid' in self.__dict__:
635 if '_changeid' in self.__dict__:
634 return self._changeid
636 return self._changeid
635 elif '_changectx' in self.__dict__:
637 elif '_changectx' in self.__dict__:
636 return self._changectx.rev()
638 return self._changectx.rev()
637 elif '_descendantrev' in self.__dict__:
639 elif '_descendantrev' in self.__dict__:
638 # this file context was created from a revision with a known
640 # this file context was created from a revision with a known
639 # descendant, we can (lazily) correct for linkrev aliases
641 # descendant, we can (lazily) correct for linkrev aliases
640 return self._adjustlinkrev(self._path, self._filelog,
642 return self._adjustlinkrev(self._path, self._filelog,
641 self._filenode, self._descendantrev)
643 self._filenode, self._descendantrev)
642 else:
644 else:
643 return self._filelog.linkrev(self._filerev)
645 return self._filelog.linkrev(self._filerev)
644
646
645 @propertycache
647 @propertycache
646 def _filenode(self):
648 def _filenode(self):
647 if '_fileid' in self.__dict__:
649 if '_fileid' in self.__dict__:
648 return self._filelog.lookup(self._fileid)
650 return self._filelog.lookup(self._fileid)
649 else:
651 else:
650 return self._changectx.filenode(self._path)
652 return self._changectx.filenode(self._path)
651
653
652 @propertycache
654 @propertycache
653 def _filerev(self):
655 def _filerev(self):
654 return self._filelog.rev(self._filenode)
656 return self._filelog.rev(self._filenode)
655
657
656 @propertycache
658 @propertycache
657 def _repopath(self):
659 def _repopath(self):
658 return self._path
660 return self._path
659
661
660 def __nonzero__(self):
662 def __nonzero__(self):
661 try:
663 try:
662 self._filenode
664 self._filenode
663 return True
665 return True
664 except error.LookupError:
666 except error.LookupError:
665 # file is missing
667 # file is missing
666 return False
668 return False
667
669
668 def __str__(self):
670 def __str__(self):
669 return "%s@%s" % (self.path(), self._changectx)
671 return "%s@%s" % (self.path(), self._changectx)
670
672
671 def __repr__(self):
673 def __repr__(self):
672 return "<%s %s>" % (type(self).__name__, str(self))
674 return "<%s %s>" % (type(self).__name__, str(self))
673
675
674 def __hash__(self):
676 def __hash__(self):
675 try:
677 try:
676 return hash((self._path, self._filenode))
678 return hash((self._path, self._filenode))
677 except AttributeError:
679 except AttributeError:
678 return id(self)
680 return id(self)
679
681
680 def __eq__(self, other):
682 def __eq__(self, other):
681 try:
683 try:
682 return (type(self) == type(other) and self._path == other._path
684 return (type(self) == type(other) and self._path == other._path
683 and self._filenode == other._filenode)
685 and self._filenode == other._filenode)
684 except AttributeError:
686 except AttributeError:
685 return False
687 return False
686
688
687 def __ne__(self, other):
689 def __ne__(self, other):
688 return not (self == other)
690 return not (self == other)
689
691
690 def filerev(self):
692 def filerev(self):
691 return self._filerev
693 return self._filerev
692 def filenode(self):
694 def filenode(self):
693 return self._filenode
695 return self._filenode
694 def flags(self):
696 def flags(self):
695 return self._changectx.flags(self._path)
697 return self._changectx.flags(self._path)
696 def filelog(self):
698 def filelog(self):
697 return self._filelog
699 return self._filelog
698 def rev(self):
700 def rev(self):
699 return self._changeid
701 return self._changeid
700 def linkrev(self):
702 def linkrev(self):
701 return self._filelog.linkrev(self._filerev)
703 return self._filelog.linkrev(self._filerev)
702 def node(self):
704 def node(self):
703 return self._changectx.node()
705 return self._changectx.node()
704 def hex(self):
706 def hex(self):
705 return self._changectx.hex()
707 return self._changectx.hex()
706 def user(self):
708 def user(self):
707 return self._changectx.user()
709 return self._changectx.user()
708 def date(self):
710 def date(self):
709 return self._changectx.date()
711 return self._changectx.date()
710 def files(self):
712 def files(self):
711 return self._changectx.files()
713 return self._changectx.files()
712 def description(self):
714 def description(self):
713 return self._changectx.description()
715 return self._changectx.description()
714 def branch(self):
716 def branch(self):
715 return self._changectx.branch()
717 return self._changectx.branch()
716 def extra(self):
718 def extra(self):
717 return self._changectx.extra()
719 return self._changectx.extra()
718 def phase(self):
720 def phase(self):
719 return self._changectx.phase()
721 return self._changectx.phase()
720 def phasestr(self):
722 def phasestr(self):
721 return self._changectx.phasestr()
723 return self._changectx.phasestr()
722 def manifest(self):
724 def manifest(self):
723 return self._changectx.manifest()
725 return self._changectx.manifest()
724 def changectx(self):
726 def changectx(self):
725 return self._changectx
727 return self._changectx
726
728
727 def path(self):
729 def path(self):
728 return self._path
730 return self._path
729
731
730 def isbinary(self):
732 def isbinary(self):
731 try:
733 try:
732 return util.binary(self.data())
734 return util.binary(self.data())
733 except IOError:
735 except IOError:
734 return False
736 return False
735 def isexec(self):
737 def isexec(self):
736 return 'x' in self.flags()
738 return 'x' in self.flags()
737 def islink(self):
739 def islink(self):
738 return 'l' in self.flags()
740 return 'l' in self.flags()
739
741
740 def cmp(self, fctx):
742 def cmp(self, fctx):
741 """compare with other file context
743 """compare with other file context
742
744
743 returns True if different than fctx.
745 returns True if different than fctx.
744 """
746 """
745 if (fctx._filerev is None
747 if (fctx._filerev is None
746 and (self._repo._encodefilterpats
748 and (self._repo._encodefilterpats
747 # if file data starts with '\1\n', empty metadata block is
749 # if file data starts with '\1\n', empty metadata block is
748 # prepended, which adds 4 bytes to filelog.size().
750 # prepended, which adds 4 bytes to filelog.size().
749 or self.size() - 4 == fctx.size())
751 or self.size() - 4 == fctx.size())
750 or self.size() == fctx.size()):
752 or self.size() == fctx.size()):
751 return self._filelog.cmp(self._filenode, fctx.data())
753 return self._filelog.cmp(self._filenode, fctx.data())
752
754
753 return True
755 return True
754
756
755 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
757 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
756 """return the first ancestor of <srcrev> introducing <fnode>
758 """return the first ancestor of <srcrev> introducing <fnode>
757
759
758 If the linkrev of the file revision does not point to an ancestor of
760 If the linkrev of the file revision does not point to an ancestor of
759 srcrev, we'll walk down the ancestors until we find one introducing
761 srcrev, we'll walk down the ancestors until we find one introducing
760 this file revision.
762 this file revision.
761
763
762 :repo: a localrepository object (used to access changelog and manifest)
764 :repo: a localrepository object (used to access changelog and manifest)
763 :path: the file path
765 :path: the file path
764 :fnode: the nodeid of the file revision
766 :fnode: the nodeid of the file revision
765 :filelog: the filelog of this path
767 :filelog: the filelog of this path
766 :srcrev: the changeset revision we search ancestors from
768 :srcrev: the changeset revision we search ancestors from
767 :inclusive: if true, the src revision will also be checked
769 :inclusive: if true, the src revision will also be checked
768 """
770 """
769 repo = self._repo
771 repo = self._repo
770 cl = repo.unfiltered().changelog
772 cl = repo.unfiltered().changelog
771 ma = repo.manifest
773 ma = repo.manifest
772 # fetch the linkrev
774 # fetch the linkrev
773 fr = filelog.rev(fnode)
775 fr = filelog.rev(fnode)
774 lkr = filelog.linkrev(fr)
776 lkr = filelog.linkrev(fr)
775 # hack to reuse ancestor computation when searching for renames
777 # hack to reuse ancestor computation when searching for renames
776 memberanc = getattr(self, '_ancestrycontext', None)
778 memberanc = getattr(self, '_ancestrycontext', None)
777 iteranc = None
779 iteranc = None
778 if memberanc is None:
780 if memberanc is None:
779 memberanc = iteranc = cl.ancestors([srcrev], lkr,
781 memberanc = iteranc = cl.ancestors([srcrev], lkr,
780 inclusive=inclusive)
782 inclusive=inclusive)
781 # check if this linkrev is an ancestor of srcrev
783 # check if this linkrev is an ancestor of srcrev
782 if lkr not in memberanc:
784 if lkr not in memberanc:
783 if iteranc is None:
785 if iteranc is None:
784 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
786 iteranc = cl.ancestors([srcrev], lkr, inclusive=inclusive)
785 for a in iteranc:
787 for a in iteranc:
786 ac = cl.read(a) # get changeset data (we avoid object creation)
788 ac = cl.read(a) # get changeset data (we avoid object creation)
787 if path in ac[3]: # checking the 'files' field.
789 if path in ac[3]: # checking the 'files' field.
788 # The file has been touched, check if the content is
790 # The file has been touched, check if the content is
789 # similar to the one we search for.
791 # similar to the one we search for.
790 if fnode == ma.readfast(ac[0]).get(path):
792 if fnode == ma.readfast(ac[0]).get(path):
791 return a
793 return a
792 # In theory, we should never get out of that loop without a result.
794 # In theory, we should never get out of that loop without a result.
793 # But if manifest uses a buggy file revision (not children of the
795 # But if manifest uses a buggy file revision (not children of the
794 # one it replaces) we could. Such a buggy situation will likely
796 # one it replaces) we could. Such a buggy situation will likely
795 # result is crash somewhere else at to some point.
797 # result is crash somewhere else at to some point.
796 return lkr
798 return lkr
797
799
798 def introrev(self):
800 def introrev(self):
799 """return the rev of the changeset which introduced this file revision
801 """return the rev of the changeset which introduced this file revision
800
802
801 This method is different from linkrev because it take into account the
803 This method is different from linkrev because it take into account the
802 changeset the filectx was created from. It ensures the returned
804 changeset the filectx was created from. It ensures the returned
803 revision is one of its ancestors. This prevents bugs from
805 revision is one of its ancestors. This prevents bugs from
804 'linkrev-shadowing' when a file revision is used by multiple
806 'linkrev-shadowing' when a file revision is used by multiple
805 changesets.
807 changesets.
806 """
808 """
807 lkr = self.linkrev()
809 lkr = self.linkrev()
808 attrs = vars(self)
810 attrs = vars(self)
809 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
811 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
810 if noctx or self.rev() == lkr:
812 if noctx or self.rev() == lkr:
811 return self.linkrev()
813 return self.linkrev()
812 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
814 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
813 self.rev(), inclusive=True)
815 self.rev(), inclusive=True)
814
816
815 def parents(self):
817 def parents(self):
816 _path = self._path
818 _path = self._path
817 fl = self._filelog
819 fl = self._filelog
818 parents = self._filelog.parents(self._filenode)
820 parents = self._filelog.parents(self._filenode)
819 pl = [(_path, node, fl) for node in parents if node != nullid]
821 pl = [(_path, node, fl) for node in parents if node != nullid]
820
822
821 r = fl.renamed(self._filenode)
823 r = fl.renamed(self._filenode)
822 if r:
824 if r:
823 # - In the simple rename case, both parent are nullid, pl is empty.
825 # - In the simple rename case, both parent are nullid, pl is empty.
824 # - In case of merge, only one of the parent is null id and should
826 # - In case of merge, only one of the parent is null id and should
825 # be replaced with the rename information. This parent is -always-
827 # be replaced with the rename information. This parent is -always-
826 # the first one.
828 # the first one.
827 #
829 #
828 # As null id have always been filtered out in the previous list
830 # As null id have always been filtered out in the previous list
829 # comprehension, inserting to 0 will always result in "replacing
831 # comprehension, inserting to 0 will always result in "replacing
830 # first nullid parent with rename information.
832 # first nullid parent with rename information.
831 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
833 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
832
834
833 ret = []
835 ret = []
834 for path, fnode, l in pl:
836 for path, fnode, l in pl:
835 if '_changeid' in vars(self) or '_changectx' in vars(self):
837 if '_changeid' in vars(self) or '_changectx' in vars(self):
836 # If self is associated with a changeset (probably explicitly
838 # If self is associated with a changeset (probably explicitly
837 # fed), ensure the created filectx is associated with a
839 # fed), ensure the created filectx is associated with a
838 # changeset that is an ancestor of self.changectx.
840 # changeset that is an ancestor of self.changectx.
839 # This lets us later use _adjustlinkrev to get a correct link.
841 # This lets us later use _adjustlinkrev to get a correct link.
840 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
842 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
841 fctx._descendantrev = self.rev()
843 fctx._descendantrev = self.rev()
842 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
844 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
843 elif '_descendantrev' in vars(self):
845 elif '_descendantrev' in vars(self):
844 # Otherwise propagate _descendantrev if we have one associated.
846 # Otherwise propagate _descendantrev if we have one associated.
845 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
847 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
846 fctx._descendantrev = self._descendantrev
848 fctx._descendantrev = self._descendantrev
847 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
849 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
848 else:
850 else:
849 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
851 fctx = filectx(self._repo, path, fileid=fnode, filelog=l)
850 ret.append(fctx)
852 ret.append(fctx)
851 return ret
853 return ret
852
854
853 def p1(self):
855 def p1(self):
854 return self.parents()[0]
856 return self.parents()[0]
855
857
856 def p2(self):
858 def p2(self):
857 p = self.parents()
859 p = self.parents()
858 if len(p) == 2:
860 if len(p) == 2:
859 return p[1]
861 return p[1]
860 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
862 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
861
863
862 def annotate(self, follow=False, linenumber=None, diffopts=None):
864 def annotate(self, follow=False, linenumber=None, diffopts=None):
863 '''returns a list of tuples of (ctx, line) for each line
865 '''returns a list of tuples of (ctx, line) for each line
864 in the file, where ctx is the filectx of the node where
866 in the file, where ctx is the filectx of the node where
865 that line was last changed.
867 that line was last changed.
866 This returns tuples of ((ctx, linenumber), line) for each line,
868 This returns tuples of ((ctx, linenumber), line) for each line,
867 if "linenumber" parameter is NOT "None".
869 if "linenumber" parameter is NOT "None".
868 In such tuples, linenumber means one at the first appearance
870 In such tuples, linenumber means one at the first appearance
869 in the managed file.
871 in the managed file.
870 To reduce annotation cost,
872 To reduce annotation cost,
871 this returns fixed value(False is used) as linenumber,
873 this returns fixed value(False is used) as linenumber,
872 if "linenumber" parameter is "False".'''
874 if "linenumber" parameter is "False".'''
873
875
874 if linenumber is None:
876 if linenumber is None:
875 def decorate(text, rev):
877 def decorate(text, rev):
876 return ([rev] * len(text.splitlines()), text)
878 return ([rev] * len(text.splitlines()), text)
877 elif linenumber:
879 elif linenumber:
878 def decorate(text, rev):
880 def decorate(text, rev):
879 size = len(text.splitlines())
881 size = len(text.splitlines())
880 return ([(rev, i) for i in xrange(1, size + 1)], text)
882 return ([(rev, i) for i in xrange(1, size + 1)], text)
881 else:
883 else:
882 def decorate(text, rev):
884 def decorate(text, rev):
883 return ([(rev, False)] * len(text.splitlines()), text)
885 return ([(rev, False)] * len(text.splitlines()), text)
884
886
885 def pair(parent, child):
887 def pair(parent, child):
886 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
888 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
887 refine=True)
889 refine=True)
888 for (a1, a2, b1, b2), t in blocks:
890 for (a1, a2, b1, b2), t in blocks:
889 # Changed blocks ('!') or blocks made only of blank lines ('~')
891 # Changed blocks ('!') or blocks made only of blank lines ('~')
890 # belong to the child.
892 # belong to the child.
891 if t == '=':
893 if t == '=':
892 child[0][b1:b2] = parent[0][a1:a2]
894 child[0][b1:b2] = parent[0][a1:a2]
893 return child
895 return child
894
896
895 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
897 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
896
898
897 def parents(f):
899 def parents(f):
898 pl = f.parents()
900 pl = f.parents()
899
901
900 # Don't return renamed parents if we aren't following.
902 # Don't return renamed parents if we aren't following.
901 if not follow:
903 if not follow:
902 pl = [p for p in pl if p.path() == f.path()]
904 pl = [p for p in pl if p.path() == f.path()]
903
905
904 # renamed filectx won't have a filelog yet, so set it
906 # renamed filectx won't have a filelog yet, so set it
905 # from the cache to save time
907 # from the cache to save time
906 for p in pl:
908 for p in pl:
907 if not '_filelog' in p.__dict__:
909 if not '_filelog' in p.__dict__:
908 p._filelog = getlog(p.path())
910 p._filelog = getlog(p.path())
909
911
910 return pl
912 return pl
911
913
912 # use linkrev to find the first changeset where self appeared
914 # use linkrev to find the first changeset where self appeared
913 base = self
915 base = self
914 introrev = self.introrev()
916 introrev = self.introrev()
915 if self.rev() != introrev:
917 if self.rev() != introrev:
916 base = self.filectx(self.filenode(), changeid=introrev)
918 base = self.filectx(self.filenode(), changeid=introrev)
917
919
918 # This algorithm would prefer to be recursive, but Python is a
920 # This algorithm would prefer to be recursive, but Python is a
919 # bit recursion-hostile. Instead we do an iterative
921 # bit recursion-hostile. Instead we do an iterative
920 # depth-first search.
922 # depth-first search.
921
923
922 visit = [base]
924 visit = [base]
923 hist = {}
925 hist = {}
924 pcache = {}
926 pcache = {}
925 needed = {base: 1}
927 needed = {base: 1}
926 while visit:
928 while visit:
927 f = visit[-1]
929 f = visit[-1]
928 pcached = f in pcache
930 pcached = f in pcache
929 if not pcached:
931 if not pcached:
930 pcache[f] = parents(f)
932 pcache[f] = parents(f)
931
933
932 ready = True
934 ready = True
933 pl = pcache[f]
935 pl = pcache[f]
934 for p in pl:
936 for p in pl:
935 if p not in hist:
937 if p not in hist:
936 ready = False
938 ready = False
937 visit.append(p)
939 visit.append(p)
938 if not pcached:
940 if not pcached:
939 needed[p] = needed.get(p, 0) + 1
941 needed[p] = needed.get(p, 0) + 1
940 if ready:
942 if ready:
941 visit.pop()
943 visit.pop()
942 reusable = f in hist
944 reusable = f in hist
943 if reusable:
945 if reusable:
944 curr = hist[f]
946 curr = hist[f]
945 else:
947 else:
946 curr = decorate(f.data(), f)
948 curr = decorate(f.data(), f)
947 for p in pl:
949 for p in pl:
948 if not reusable:
950 if not reusable:
949 curr = pair(hist[p], curr)
951 curr = pair(hist[p], curr)
950 if needed[p] == 1:
952 if needed[p] == 1:
951 del hist[p]
953 del hist[p]
952 del needed[p]
954 del needed[p]
953 else:
955 else:
954 needed[p] -= 1
956 needed[p] -= 1
955
957
956 hist[f] = curr
958 hist[f] = curr
957 pcache[f] = []
959 pcache[f] = []
958
960
959 return zip(hist[base][0], hist[base][1].splitlines(True))
961 return zip(hist[base][0], hist[base][1].splitlines(True))
960
962
961 def ancestors(self, followfirst=False):
963 def ancestors(self, followfirst=False):
962 visit = {}
964 visit = {}
963 c = self
965 c = self
964 cut = followfirst and 1 or None
966 cut = followfirst and 1 or None
965 while True:
967 while True:
966 for parent in c.parents()[:cut]:
968 for parent in c.parents()[:cut]:
967 visit[(parent.linkrev(), parent.filenode())] = parent
969 visit[(parent.linkrev(), parent.filenode())] = parent
968 if not visit:
970 if not visit:
969 break
971 break
970 c = visit.pop(max(visit))
972 c = visit.pop(max(visit))
971 yield c
973 yield c
972
974
973 class filectx(basefilectx):
975 class filectx(basefilectx):
974 """A filecontext object makes access to data related to a particular
976 """A filecontext object makes access to data related to a particular
975 filerevision convenient."""
977 filerevision convenient."""
976 def __init__(self, repo, path, changeid=None, fileid=None,
978 def __init__(self, repo, path, changeid=None, fileid=None,
977 filelog=None, changectx=None):
979 filelog=None, changectx=None):
978 """changeid can be a changeset revision, node, or tag.
980 """changeid can be a changeset revision, node, or tag.
979 fileid can be a file revision or node."""
981 fileid can be a file revision or node."""
980 self._repo = repo
982 self._repo = repo
981 self._path = path
983 self._path = path
982
984
983 assert (changeid is not None
985 assert (changeid is not None
984 or fileid is not None
986 or fileid is not None
985 or changectx is not None), \
987 or changectx is not None), \
986 ("bad args: changeid=%r, fileid=%r, changectx=%r"
988 ("bad args: changeid=%r, fileid=%r, changectx=%r"
987 % (changeid, fileid, changectx))
989 % (changeid, fileid, changectx))
988
990
989 if filelog is not None:
991 if filelog is not None:
990 self._filelog = filelog
992 self._filelog = filelog
991
993
992 if changeid is not None:
994 if changeid is not None:
993 self._changeid = changeid
995 self._changeid = changeid
994 if changectx is not None:
996 if changectx is not None:
995 self._changectx = changectx
997 self._changectx = changectx
996 if fileid is not None:
998 if fileid is not None:
997 self._fileid = fileid
999 self._fileid = fileid
998
1000
999 @propertycache
1001 @propertycache
1000 def _changectx(self):
1002 def _changectx(self):
1001 try:
1003 try:
1002 return changectx(self._repo, self._changeid)
1004 return changectx(self._repo, self._changeid)
1003 except error.FilteredRepoLookupError:
1005 except error.FilteredRepoLookupError:
1004 # Linkrev may point to any revision in the repository. When the
1006 # Linkrev may point to any revision in the repository. When the
1005 # repository is filtered this may lead to `filectx` trying to build
1007 # repository is filtered this may lead to `filectx` trying to build
1006 # `changectx` for filtered revision. In such case we fallback to
1008 # `changectx` for filtered revision. In such case we fallback to
1007 # creating `changectx` on the unfiltered version of the reposition.
1009 # creating `changectx` on the unfiltered version of the reposition.
1008 # This fallback should not be an issue because `changectx` from
1010 # This fallback should not be an issue because `changectx` from
1009 # `filectx` are not used in complex operations that care about
1011 # `filectx` are not used in complex operations that care about
1010 # filtering.
1012 # filtering.
1011 #
1013 #
1012 # This fallback is a cheap and dirty fix that prevent several
1014 # This fallback is a cheap and dirty fix that prevent several
1013 # crashes. It does not ensure the behavior is correct. However the
1015 # crashes. It does not ensure the behavior is correct. However the
1014 # behavior was not correct before filtering either and "incorrect
1016 # behavior was not correct before filtering either and "incorrect
1015 # behavior" is seen as better as "crash"
1017 # behavior" is seen as better as "crash"
1016 #
1018 #
1017 # Linkrevs have several serious troubles with filtering that are
1019 # Linkrevs have several serious troubles with filtering that are
1018 # complicated to solve. Proper handling of the issue here should be
1020 # complicated to solve. Proper handling of the issue here should be
1019 # considered when solving linkrev issue are on the table.
1021 # considered when solving linkrev issue are on the table.
1020 return changectx(self._repo.unfiltered(), self._changeid)
1022 return changectx(self._repo.unfiltered(), self._changeid)
1021
1023
1022 def filectx(self, fileid, changeid=None):
1024 def filectx(self, fileid, changeid=None):
1023 '''opens an arbitrary revision of the file without
1025 '''opens an arbitrary revision of the file without
1024 opening a new filelog'''
1026 opening a new filelog'''
1025 return filectx(self._repo, self._path, fileid=fileid,
1027 return filectx(self._repo, self._path, fileid=fileid,
1026 filelog=self._filelog, changeid=changeid)
1028 filelog=self._filelog, changeid=changeid)
1027
1029
1028 def data(self):
1030 def data(self):
1029 try:
1031 try:
1030 return self._filelog.read(self._filenode)
1032 return self._filelog.read(self._filenode)
1031 except error.CensoredNodeError:
1033 except error.CensoredNodeError:
1032 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1034 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1033 return ""
1035 return ""
1034 raise util.Abort(_("censored node: %s") % short(self._filenode),
1036 raise util.Abort(_("censored node: %s") % short(self._filenode),
1035 hint=_("set censor.policy to ignore errors"))
1037 hint=_("set censor.policy to ignore errors"))
1036
1038
1037 def size(self):
1039 def size(self):
1038 return self._filelog.size(self._filerev)
1040 return self._filelog.size(self._filerev)
1039
1041
1040 def renamed(self):
1042 def renamed(self):
1041 """check if file was actually renamed in this changeset revision
1043 """check if file was actually renamed in this changeset revision
1042
1044
1043 If rename logged in file revision, we report copy for changeset only
1045 If rename logged in file revision, we report copy for changeset only
1044 if file revisions linkrev points back to the changeset in question
1046 if file revisions linkrev points back to the changeset in question
1045 or both changeset parents contain different file revisions.
1047 or both changeset parents contain different file revisions.
1046 """
1048 """
1047
1049
1048 renamed = self._filelog.renamed(self._filenode)
1050 renamed = self._filelog.renamed(self._filenode)
1049 if not renamed:
1051 if not renamed:
1050 return renamed
1052 return renamed
1051
1053
1052 if self.rev() == self.linkrev():
1054 if self.rev() == self.linkrev():
1053 return renamed
1055 return renamed
1054
1056
1055 name = self.path()
1057 name = self.path()
1056 fnode = self._filenode
1058 fnode = self._filenode
1057 for p in self._changectx.parents():
1059 for p in self._changectx.parents():
1058 try:
1060 try:
1059 if fnode == p.filenode(name):
1061 if fnode == p.filenode(name):
1060 return None
1062 return None
1061 except error.LookupError:
1063 except error.LookupError:
1062 pass
1064 pass
1063 return renamed
1065 return renamed
1064
1066
1065 def children(self):
1067 def children(self):
1066 # hard for renames
1068 # hard for renames
1067 c = self._filelog.children(self._filenode)
1069 c = self._filelog.children(self._filenode)
1068 return [filectx(self._repo, self._path, fileid=x,
1070 return [filectx(self._repo, self._path, fileid=x,
1069 filelog=self._filelog) for x in c]
1071 filelog=self._filelog) for x in c]
1070
1072
1071 class committablectx(basectx):
1073 class committablectx(basectx):
1072 """A committablectx object provides common functionality for a context that
1074 """A committablectx object provides common functionality for a context that
1073 wants the ability to commit, e.g. workingctx or memctx."""
1075 wants the ability to commit, e.g. workingctx or memctx."""
1074 def __init__(self, repo, text="", user=None, date=None, extra=None,
1076 def __init__(self, repo, text="", user=None, date=None, extra=None,
1075 changes=None):
1077 changes=None):
1076 self._repo = repo
1078 self._repo = repo
1077 self._rev = None
1079 self._rev = None
1078 self._node = None
1080 self._node = None
1079 self._text = text
1081 self._text = text
1080 if date:
1082 if date:
1081 self._date = util.parsedate(date)
1083 self._date = util.parsedate(date)
1082 if user:
1084 if user:
1083 self._user = user
1085 self._user = user
1084 if changes:
1086 if changes:
1085 self._status = changes
1087 self._status = changes
1086
1088
1087 self._extra = {}
1089 self._extra = {}
1088 if extra:
1090 if extra:
1089 self._extra = extra.copy()
1091 self._extra = extra.copy()
1090 if 'branch' not in self._extra:
1092 if 'branch' not in self._extra:
1091 try:
1093 try:
1092 branch = encoding.fromlocal(self._repo.dirstate.branch())
1094 branch = encoding.fromlocal(self._repo.dirstate.branch())
1093 except UnicodeDecodeError:
1095 except UnicodeDecodeError:
1094 raise util.Abort(_('branch name not in UTF-8!'))
1096 raise util.Abort(_('branch name not in UTF-8!'))
1095 self._extra['branch'] = branch
1097 self._extra['branch'] = branch
1096 if self._extra['branch'] == '':
1098 if self._extra['branch'] == '':
1097 self._extra['branch'] = 'default'
1099 self._extra['branch'] = 'default'
1098
1100
1099 def __str__(self):
1101 def __str__(self):
1100 return str(self._parents[0]) + "+"
1102 return str(self._parents[0]) + "+"
1101
1103
1102 def __nonzero__(self):
1104 def __nonzero__(self):
1103 return True
1105 return True
1104
1106
1105 def _buildflagfunc(self):
1107 def _buildflagfunc(self):
1106 # Create a fallback function for getting file flags when the
1108 # Create a fallback function for getting file flags when the
1107 # filesystem doesn't support them
1109 # filesystem doesn't support them
1108
1110
1109 copiesget = self._repo.dirstate.copies().get
1111 copiesget = self._repo.dirstate.copies().get
1110
1112
1111 if len(self._parents) < 2:
1113 if len(self._parents) < 2:
1112 # when we have one parent, it's easy: copy from parent
1114 # when we have one parent, it's easy: copy from parent
1113 man = self._parents[0].manifest()
1115 man = self._parents[0].manifest()
1114 def func(f):
1116 def func(f):
1115 f = copiesget(f, f)
1117 f = copiesget(f, f)
1116 return man.flags(f)
1118 return man.flags(f)
1117 else:
1119 else:
1118 # merges are tricky: we try to reconstruct the unstored
1120 # merges are tricky: we try to reconstruct the unstored
1119 # result from the merge (issue1802)
1121 # result from the merge (issue1802)
1120 p1, p2 = self._parents
1122 p1, p2 = self._parents
1121 pa = p1.ancestor(p2)
1123 pa = p1.ancestor(p2)
1122 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1124 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1123
1125
1124 def func(f):
1126 def func(f):
1125 f = copiesget(f, f) # may be wrong for merges with copies
1127 f = copiesget(f, f) # may be wrong for merges with copies
1126 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1128 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1127 if fl1 == fl2:
1129 if fl1 == fl2:
1128 return fl1
1130 return fl1
1129 if fl1 == fla:
1131 if fl1 == fla:
1130 return fl2
1132 return fl2
1131 if fl2 == fla:
1133 if fl2 == fla:
1132 return fl1
1134 return fl1
1133 return '' # punt for conflicts
1135 return '' # punt for conflicts
1134
1136
1135 return func
1137 return func
1136
1138
1137 @propertycache
1139 @propertycache
1138 def _flagfunc(self):
1140 def _flagfunc(self):
1139 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1141 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1140
1142
1141 @propertycache
1143 @propertycache
1142 def _manifest(self):
1144 def _manifest(self):
1143 """generate a manifest corresponding to the values in self._status
1145 """generate a manifest corresponding to the values in self._status
1144
1146
1145 This reuse the file nodeid from parent, but we append an extra letter
1147 This reuse the file nodeid from parent, but we append an extra letter
1146 when modified. Modified files get an extra 'm' while added files get
1148 when modified. Modified files get an extra 'm' while added files get
1147 an extra 'a'. This is used by manifests merge to see that files
1149 an extra 'a'. This is used by manifests merge to see that files
1148 are different and by update logic to avoid deleting newly added files.
1150 are different and by update logic to avoid deleting newly added files.
1149 """
1151 """
1150
1152
1151 man1 = self._parents[0].manifest()
1153 man1 = self._parents[0].manifest()
1152 man = man1.copy()
1154 man = man1.copy()
1153 if len(self._parents) > 1:
1155 if len(self._parents) > 1:
1154 man2 = self.p2().manifest()
1156 man2 = self.p2().manifest()
1155 def getman(f):
1157 def getman(f):
1156 if f in man1:
1158 if f in man1:
1157 return man1
1159 return man1
1158 return man2
1160 return man2
1159 else:
1161 else:
1160 getman = lambda f: man1
1162 getman = lambda f: man1
1161
1163
1162 copied = self._repo.dirstate.copies()
1164 copied = self._repo.dirstate.copies()
1163 ff = self._flagfunc
1165 ff = self._flagfunc
1164 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1166 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1165 for f in l:
1167 for f in l:
1166 orig = copied.get(f, f)
1168 orig = copied.get(f, f)
1167 man[f] = getman(orig).get(orig, nullid) + i
1169 man[f] = getman(orig).get(orig, nullid) + i
1168 try:
1170 try:
1169 man.setflag(f, ff(f))
1171 man.setflag(f, ff(f))
1170 except OSError:
1172 except OSError:
1171 pass
1173 pass
1172
1174
1173 for f in self._status.deleted + self._status.removed:
1175 for f in self._status.deleted + self._status.removed:
1174 if f in man:
1176 if f in man:
1175 del man[f]
1177 del man[f]
1176
1178
1177 return man
1179 return man
1178
1180
1179 @propertycache
1181 @propertycache
1180 def _status(self):
1182 def _status(self):
1181 return self._repo.status()
1183 return self._repo.status()
1182
1184
1183 @propertycache
1185 @propertycache
1184 def _user(self):
1186 def _user(self):
1185 return self._repo.ui.username()
1187 return self._repo.ui.username()
1186
1188
1187 @propertycache
1189 @propertycache
1188 def _date(self):
1190 def _date(self):
1189 return util.makedate()
1191 return util.makedate()
1190
1192
1191 def subrev(self, subpath):
1193 def subrev(self, subpath):
1192 return None
1194 return None
1193
1195
1194 def user(self):
1196 def user(self):
1195 return self._user or self._repo.ui.username()
1197 return self._user or self._repo.ui.username()
1196 def date(self):
1198 def date(self):
1197 return self._date
1199 return self._date
1198 def description(self):
1200 def description(self):
1199 return self._text
1201 return self._text
1200 def files(self):
1202 def files(self):
1201 return sorted(self._status.modified + self._status.added +
1203 return sorted(self._status.modified + self._status.added +
1202 self._status.removed)
1204 self._status.removed)
1203
1205
1204 def modified(self):
1206 def modified(self):
1205 return self._status.modified
1207 return self._status.modified
1206 def added(self):
1208 def added(self):
1207 return self._status.added
1209 return self._status.added
1208 def removed(self):
1210 def removed(self):
1209 return self._status.removed
1211 return self._status.removed
1210 def deleted(self):
1212 def deleted(self):
1211 return self._status.deleted
1213 return self._status.deleted
1212 def branch(self):
1214 def branch(self):
1213 return encoding.tolocal(self._extra['branch'])
1215 return encoding.tolocal(self._extra['branch'])
1214 def closesbranch(self):
1216 def closesbranch(self):
1215 return 'close' in self._extra
1217 return 'close' in self._extra
1216 def extra(self):
1218 def extra(self):
1217 return self._extra
1219 return self._extra
1218
1220
1219 def tags(self):
1221 def tags(self):
1220 t = []
1222 t = []
1221 for p in self.parents():
1223 for p in self.parents():
1222 t.extend(p.tags())
1224 t.extend(p.tags())
1223 return t
1225 return t
1224
1226
1225 def bookmarks(self):
1227 def bookmarks(self):
1226 b = []
1228 b = []
1227 for p in self.parents():
1229 for p in self.parents():
1228 b.extend(p.bookmarks())
1230 b.extend(p.bookmarks())
1229 return b
1231 return b
1230
1232
1231 def phase(self):
1233 def phase(self):
1232 phase = phases.draft # default phase to draft
1234 phase = phases.draft # default phase to draft
1233 for p in self.parents():
1235 for p in self.parents():
1234 phase = max(phase, p.phase())
1236 phase = max(phase, p.phase())
1235 return phase
1237 return phase
1236
1238
1237 def hidden(self):
1239 def hidden(self):
1238 return False
1240 return False
1239
1241
1240 def children(self):
1242 def children(self):
1241 return []
1243 return []
1242
1244
1243 def flags(self, path):
1245 def flags(self, path):
1244 if '_manifest' in self.__dict__:
1246 if '_manifest' in self.__dict__:
1245 try:
1247 try:
1246 return self._manifest.flags(path)
1248 return self._manifest.flags(path)
1247 except KeyError:
1249 except KeyError:
1248 return ''
1250 return ''
1249
1251
1250 try:
1252 try:
1251 return self._flagfunc(path)
1253 return self._flagfunc(path)
1252 except OSError:
1254 except OSError:
1253 return ''
1255 return ''
1254
1256
1255 def ancestor(self, c2):
1257 def ancestor(self, c2):
1256 """return the "best" ancestor context of self and c2"""
1258 """return the "best" ancestor context of self and c2"""
1257 return self._parents[0].ancestor(c2) # punt on two parents for now
1259 return self._parents[0].ancestor(c2) # punt on two parents for now
1258
1260
1259 def walk(self, match):
1261 def walk(self, match):
1260 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1262 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1261 True, False))
1263 True, False))
1262
1264
1263 def matches(self, match):
1265 def matches(self, match):
1264 return sorted(self._repo.dirstate.matches(match))
1266 return sorted(self._repo.dirstate.matches(match))
1265
1267
1266 def ancestors(self):
1268 def ancestors(self):
1267 for p in self._parents:
1269 for p in self._parents:
1268 yield p
1270 yield p
1269 for a in self._repo.changelog.ancestors(
1271 for a in self._repo.changelog.ancestors(
1270 [p.rev() for p in self._parents]):
1272 [p.rev() for p in self._parents]):
1271 yield changectx(self._repo, a)
1273 yield changectx(self._repo, a)
1272
1274
1273 def markcommitted(self, node):
1275 def markcommitted(self, node):
1274 """Perform post-commit cleanup necessary after committing this ctx
1276 """Perform post-commit cleanup necessary after committing this ctx
1275
1277
1276 Specifically, this updates backing stores this working context
1278 Specifically, this updates backing stores this working context
1277 wraps to reflect the fact that the changes reflected by this
1279 wraps to reflect the fact that the changes reflected by this
1278 workingctx have been committed. For example, it marks
1280 workingctx have been committed. For example, it marks
1279 modified and added files as normal in the dirstate.
1281 modified and added files as normal in the dirstate.
1280
1282
1281 """
1283 """
1282
1284
1283 self._repo.dirstate.beginparentchange()
1285 self._repo.dirstate.beginparentchange()
1284 for f in self.modified() + self.added():
1286 for f in self.modified() + self.added():
1285 self._repo.dirstate.normal(f)
1287 self._repo.dirstate.normal(f)
1286 for f in self.removed():
1288 for f in self.removed():
1287 self._repo.dirstate.drop(f)
1289 self._repo.dirstate.drop(f)
1288 self._repo.dirstate.setparents(node)
1290 self._repo.dirstate.setparents(node)
1289 self._repo.dirstate.endparentchange()
1291 self._repo.dirstate.endparentchange()
1290
1292
1291 class workingctx(committablectx):
1293 class workingctx(committablectx):
1292 """A workingctx object makes access to data related to
1294 """A workingctx object makes access to data related to
1293 the current working directory convenient.
1295 the current working directory convenient.
1294 date - any valid date string or (unixtime, offset), or None.
1296 date - any valid date string or (unixtime, offset), or None.
1295 user - username string, or None.
1297 user - username string, or None.
1296 extra - a dictionary of extra values, or None.
1298 extra - a dictionary of extra values, or None.
1297 changes - a list of file lists as returned by localrepo.status()
1299 changes - a list of file lists as returned by localrepo.status()
1298 or None to use the repository status.
1300 or None to use the repository status.
1299 """
1301 """
1300 def __init__(self, repo, text="", user=None, date=None, extra=None,
1302 def __init__(self, repo, text="", user=None, date=None, extra=None,
1301 changes=None):
1303 changes=None):
1302 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1304 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1303
1305
1304 def __iter__(self):
1306 def __iter__(self):
1305 d = self._repo.dirstate
1307 d = self._repo.dirstate
1306 for f in d:
1308 for f in d:
1307 if d[f] != 'r':
1309 if d[f] != 'r':
1308 yield f
1310 yield f
1309
1311
1310 def __contains__(self, key):
1312 def __contains__(self, key):
1311 return self._repo.dirstate[key] not in "?r"
1313 return self._repo.dirstate[key] not in "?r"
1312
1314
1313 @propertycache
1315 @propertycache
1314 def _parents(self):
1316 def _parents(self):
1315 p = self._repo.dirstate.parents()
1317 p = self._repo.dirstate.parents()
1316 if p[1] == nullid:
1318 if p[1] == nullid:
1317 p = p[:-1]
1319 p = p[:-1]
1318 return [changectx(self._repo, x) for x in p]
1320 return [changectx(self._repo, x) for x in p]
1319
1321
1320 def filectx(self, path, filelog=None):
1322 def filectx(self, path, filelog=None):
1321 """get a file context from the working directory"""
1323 """get a file context from the working directory"""
1322 return workingfilectx(self._repo, path, workingctx=self,
1324 return workingfilectx(self._repo, path, workingctx=self,
1323 filelog=filelog)
1325 filelog=filelog)
1324
1326
1325 def dirty(self, missing=False, merge=True, branch=True):
1327 def dirty(self, missing=False, merge=True, branch=True):
1326 "check whether a working directory is modified"
1328 "check whether a working directory is modified"
1327 # check subrepos first
1329 # check subrepos first
1328 for s in sorted(self.substate):
1330 for s in sorted(self.substate):
1329 if self.sub(s).dirty():
1331 if self.sub(s).dirty():
1330 return True
1332 return True
1331 # check current working dir
1333 # check current working dir
1332 return ((merge and self.p2()) or
1334 return ((merge and self.p2()) or
1333 (branch and self.branch() != self.p1().branch()) or
1335 (branch and self.branch() != self.p1().branch()) or
1334 self.modified() or self.added() or self.removed() or
1336 self.modified() or self.added() or self.removed() or
1335 (missing and self.deleted()))
1337 (missing and self.deleted()))
1336
1338
1337 def add(self, list, prefix=""):
1339 def add(self, list, prefix=""):
1338 join = lambda f: os.path.join(prefix, f)
1340 join = lambda f: os.path.join(prefix, f)
1339 wlock = self._repo.wlock()
1341 wlock = self._repo.wlock()
1340 ui, ds = self._repo.ui, self._repo.dirstate
1342 ui, ds = self._repo.ui, self._repo.dirstate
1341 try:
1343 try:
1342 rejected = []
1344 rejected = []
1343 lstat = self._repo.wvfs.lstat
1345 lstat = self._repo.wvfs.lstat
1344 for f in list:
1346 for f in list:
1345 scmutil.checkportable(ui, join(f))
1347 scmutil.checkportable(ui, join(f))
1346 try:
1348 try:
1347 st = lstat(f)
1349 st = lstat(f)
1348 except OSError:
1350 except OSError:
1349 ui.warn(_("%s does not exist!\n") % join(f))
1351 ui.warn(_("%s does not exist!\n") % join(f))
1350 rejected.append(f)
1352 rejected.append(f)
1351 continue
1353 continue
1352 if st.st_size > 10000000:
1354 if st.st_size > 10000000:
1353 ui.warn(_("%s: up to %d MB of RAM may be required "
1355 ui.warn(_("%s: up to %d MB of RAM may be required "
1354 "to manage this file\n"
1356 "to manage this file\n"
1355 "(use 'hg revert %s' to cancel the "
1357 "(use 'hg revert %s' to cancel the "
1356 "pending addition)\n")
1358 "pending addition)\n")
1357 % (f, 3 * st.st_size // 1000000, join(f)))
1359 % (f, 3 * st.st_size // 1000000, join(f)))
1358 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1360 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1359 ui.warn(_("%s not added: only files and symlinks "
1361 ui.warn(_("%s not added: only files and symlinks "
1360 "supported currently\n") % join(f))
1362 "supported currently\n") % join(f))
1361 rejected.append(f)
1363 rejected.append(f)
1362 elif ds[f] in 'amn':
1364 elif ds[f] in 'amn':
1363 ui.warn(_("%s already tracked!\n") % join(f))
1365 ui.warn(_("%s already tracked!\n") % join(f))
1364 elif ds[f] == 'r':
1366 elif ds[f] == 'r':
1365 ds.normallookup(f)
1367 ds.normallookup(f)
1366 else:
1368 else:
1367 ds.add(f)
1369 ds.add(f)
1368 return rejected
1370 return rejected
1369 finally:
1371 finally:
1370 wlock.release()
1372 wlock.release()
1371
1373
1372 def forget(self, files, prefix=""):
1374 def forget(self, files, prefix=""):
1373 join = lambda f: os.path.join(prefix, f)
1375 join = lambda f: os.path.join(prefix, f)
1374 wlock = self._repo.wlock()
1376 wlock = self._repo.wlock()
1375 try:
1377 try:
1376 rejected = []
1378 rejected = []
1377 for f in files:
1379 for f in files:
1378 if f not in self._repo.dirstate:
1380 if f not in self._repo.dirstate:
1379 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1381 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1380 rejected.append(f)
1382 rejected.append(f)
1381 elif self._repo.dirstate[f] != 'a':
1383 elif self._repo.dirstate[f] != 'a':
1382 self._repo.dirstate.remove(f)
1384 self._repo.dirstate.remove(f)
1383 else:
1385 else:
1384 self._repo.dirstate.drop(f)
1386 self._repo.dirstate.drop(f)
1385 return rejected
1387 return rejected
1386 finally:
1388 finally:
1387 wlock.release()
1389 wlock.release()
1388
1390
1389 def undelete(self, list):
1391 def undelete(self, list):
1390 pctxs = self.parents()
1392 pctxs = self.parents()
1391 wlock = self._repo.wlock()
1393 wlock = self._repo.wlock()
1392 try:
1394 try:
1393 for f in list:
1395 for f in list:
1394 if self._repo.dirstate[f] != 'r':
1396 if self._repo.dirstate[f] != 'r':
1395 self._repo.ui.warn(_("%s not removed!\n") % f)
1397 self._repo.ui.warn(_("%s not removed!\n") % f)
1396 else:
1398 else:
1397 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1399 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1398 t = fctx.data()
1400 t = fctx.data()
1399 self._repo.wwrite(f, t, fctx.flags())
1401 self._repo.wwrite(f, t, fctx.flags())
1400 self._repo.dirstate.normal(f)
1402 self._repo.dirstate.normal(f)
1401 finally:
1403 finally:
1402 wlock.release()
1404 wlock.release()
1403
1405
1404 def copy(self, source, dest):
1406 def copy(self, source, dest):
1405 try:
1407 try:
1406 st = self._repo.wvfs.lstat(dest)
1408 st = self._repo.wvfs.lstat(dest)
1407 except OSError, err:
1409 except OSError, err:
1408 if err.errno != errno.ENOENT:
1410 if err.errno != errno.ENOENT:
1409 raise
1411 raise
1410 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1412 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1411 return
1413 return
1412 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1414 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1413 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1415 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1414 "symbolic link\n") % dest)
1416 "symbolic link\n") % dest)
1415 else:
1417 else:
1416 wlock = self._repo.wlock()
1418 wlock = self._repo.wlock()
1417 try:
1419 try:
1418 if self._repo.dirstate[dest] in '?':
1420 if self._repo.dirstate[dest] in '?':
1419 self._repo.dirstate.add(dest)
1421 self._repo.dirstate.add(dest)
1420 elif self._repo.dirstate[dest] in 'r':
1422 elif self._repo.dirstate[dest] in 'r':
1421 self._repo.dirstate.normallookup(dest)
1423 self._repo.dirstate.normallookup(dest)
1422 self._repo.dirstate.copy(source, dest)
1424 self._repo.dirstate.copy(source, dest)
1423 finally:
1425 finally:
1424 wlock.release()
1426 wlock.release()
1425
1427
1426 def _filtersuspectsymlink(self, files):
1428 def _filtersuspectsymlink(self, files):
1427 if not files or self._repo.dirstate._checklink:
1429 if not files or self._repo.dirstate._checklink:
1428 return files
1430 return files
1429
1431
1430 # Symlink placeholders may get non-symlink-like contents
1432 # Symlink placeholders may get non-symlink-like contents
1431 # via user error or dereferencing by NFS or Samba servers,
1433 # via user error or dereferencing by NFS or Samba servers,
1432 # so we filter out any placeholders that don't look like a
1434 # so we filter out any placeholders that don't look like a
1433 # symlink
1435 # symlink
1434 sane = []
1436 sane = []
1435 for f in files:
1437 for f in files:
1436 if self.flags(f) == 'l':
1438 if self.flags(f) == 'l':
1437 d = self[f].data()
1439 d = self[f].data()
1438 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1440 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1439 self._repo.ui.debug('ignoring suspect symlink placeholder'
1441 self._repo.ui.debug('ignoring suspect symlink placeholder'
1440 ' "%s"\n' % f)
1442 ' "%s"\n' % f)
1441 continue
1443 continue
1442 sane.append(f)
1444 sane.append(f)
1443 return sane
1445 return sane
1444
1446
1445 def _checklookup(self, files):
1447 def _checklookup(self, files):
1446 # check for any possibly clean files
1448 # check for any possibly clean files
1447 if not files:
1449 if not files:
1448 return [], []
1450 return [], []
1449
1451
1450 modified = []
1452 modified = []
1451 fixup = []
1453 fixup = []
1452 pctx = self._parents[0]
1454 pctx = self._parents[0]
1453 # do a full compare of any files that might have changed
1455 # do a full compare of any files that might have changed
1454 for f in sorted(files):
1456 for f in sorted(files):
1455 if (f not in pctx or self.flags(f) != pctx.flags(f)
1457 if (f not in pctx or self.flags(f) != pctx.flags(f)
1456 or pctx[f].cmp(self[f])):
1458 or pctx[f].cmp(self[f])):
1457 modified.append(f)
1459 modified.append(f)
1458 else:
1460 else:
1459 fixup.append(f)
1461 fixup.append(f)
1460
1462
1461 # update dirstate for files that are actually clean
1463 # update dirstate for files that are actually clean
1462 if fixup:
1464 if fixup:
1463 try:
1465 try:
1464 # updating the dirstate is optional
1466 # updating the dirstate is optional
1465 # so we don't wait on the lock
1467 # so we don't wait on the lock
1466 # wlock can invalidate the dirstate, so cache normal _after_
1468 # wlock can invalidate the dirstate, so cache normal _after_
1467 # taking the lock
1469 # taking the lock
1468 wlock = self._repo.wlock(False)
1470 wlock = self._repo.wlock(False)
1469 normal = self._repo.dirstate.normal
1471 normal = self._repo.dirstate.normal
1470 try:
1472 try:
1471 for f in fixup:
1473 for f in fixup:
1472 normal(f)
1474 normal(f)
1473 finally:
1475 finally:
1474 wlock.release()
1476 wlock.release()
1475 except error.LockError:
1477 except error.LockError:
1476 pass
1478 pass
1477 return modified, fixup
1479 return modified, fixup
1478
1480
1479 def _manifestmatches(self, match, s):
1481 def _manifestmatches(self, match, s):
1480 """Slow path for workingctx
1482 """Slow path for workingctx
1481
1483
1482 The fast path is when we compare the working directory to its parent
1484 The fast path is when we compare the working directory to its parent
1483 which means this function is comparing with a non-parent; therefore we
1485 which means this function is comparing with a non-parent; therefore we
1484 need to build a manifest and return what matches.
1486 need to build a manifest and return what matches.
1485 """
1487 """
1486 mf = self._repo['.']._manifestmatches(match, s)
1488 mf = self._repo['.']._manifestmatches(match, s)
1487 for f in s.modified + s.added:
1489 for f in s.modified + s.added:
1488 mf[f] = _newnode
1490 mf[f] = _newnode
1489 mf.setflag(f, self.flags(f))
1491 mf.setflag(f, self.flags(f))
1490 for f in s.removed:
1492 for f in s.removed:
1491 if f in mf:
1493 if f in mf:
1492 del mf[f]
1494 del mf[f]
1493 return mf
1495 return mf
1494
1496
1495 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1497 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1496 unknown=False):
1498 unknown=False):
1497 '''Gets the status from the dirstate -- internal use only.'''
1499 '''Gets the status from the dirstate -- internal use only.'''
1498 listignored, listclean, listunknown = ignored, clean, unknown
1500 listignored, listclean, listunknown = ignored, clean, unknown
1499 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1501 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1500 subrepos = []
1502 subrepos = []
1501 if '.hgsub' in self:
1503 if '.hgsub' in self:
1502 subrepos = sorted(self.substate)
1504 subrepos = sorted(self.substate)
1503 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1505 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1504 listclean, listunknown)
1506 listclean, listunknown)
1505
1507
1506 # check for any possibly clean files
1508 # check for any possibly clean files
1507 if cmp:
1509 if cmp:
1508 modified2, fixup = self._checklookup(cmp)
1510 modified2, fixup = self._checklookup(cmp)
1509 s.modified.extend(modified2)
1511 s.modified.extend(modified2)
1510
1512
1511 # update dirstate for files that are actually clean
1513 # update dirstate for files that are actually clean
1512 if fixup and listclean:
1514 if fixup and listclean:
1513 s.clean.extend(fixup)
1515 s.clean.extend(fixup)
1514
1516
1515 if match.always():
1517 if match.always():
1516 # cache for performance
1518 # cache for performance
1517 if s.unknown or s.ignored or s.clean:
1519 if s.unknown or s.ignored or s.clean:
1518 # "_status" is cached with list*=False in the normal route
1520 # "_status" is cached with list*=False in the normal route
1519 self._status = scmutil.status(s.modified, s.added, s.removed,
1521 self._status = scmutil.status(s.modified, s.added, s.removed,
1520 s.deleted, [], [], [])
1522 s.deleted, [], [], [])
1521 else:
1523 else:
1522 self._status = s
1524 self._status = s
1523
1525
1524 return s
1526 return s
1525
1527
1526 def _buildstatus(self, other, s, match, listignored, listclean,
1528 def _buildstatus(self, other, s, match, listignored, listclean,
1527 listunknown):
1529 listunknown):
1528 """build a status with respect to another context
1530 """build a status with respect to another context
1529
1531
1530 This includes logic for maintaining the fast path of status when
1532 This includes logic for maintaining the fast path of status when
1531 comparing the working directory against its parent, which is to skip
1533 comparing the working directory against its parent, which is to skip
1532 building a new manifest if self (working directory) is not comparing
1534 building a new manifest if self (working directory) is not comparing
1533 against its parent (repo['.']).
1535 against its parent (repo['.']).
1534 """
1536 """
1535 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1537 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1536 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1538 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1537 # might have accidentally ended up with the entire contents of the file
1539 # might have accidentally ended up with the entire contents of the file
1538 # they are supposed to be linking to.
1540 # they are supposed to be linking to.
1539 s.modified[:] = self._filtersuspectsymlink(s.modified)
1541 s.modified[:] = self._filtersuspectsymlink(s.modified)
1540 if other != self._repo['.']:
1542 if other != self._repo['.']:
1541 s = super(workingctx, self)._buildstatus(other, s, match,
1543 s = super(workingctx, self)._buildstatus(other, s, match,
1542 listignored, listclean,
1544 listignored, listclean,
1543 listunknown)
1545 listunknown)
1544 return s
1546 return s
1545
1547
1546 def _matchstatus(self, other, match):
1548 def _matchstatus(self, other, match):
1547 """override the match method with a filter for directory patterns
1549 """override the match method with a filter for directory patterns
1548
1550
1549 We use inheritance to customize the match.bad method only in cases of
1551 We use inheritance to customize the match.bad method only in cases of
1550 workingctx since it belongs only to the working directory when
1552 workingctx since it belongs only to the working directory when
1551 comparing against the parent changeset.
1553 comparing against the parent changeset.
1552
1554
1553 If we aren't comparing against the working directory's parent, then we
1555 If we aren't comparing against the working directory's parent, then we
1554 just use the default match object sent to us.
1556 just use the default match object sent to us.
1555 """
1557 """
1556 superself = super(workingctx, self)
1558 superself = super(workingctx, self)
1557 match = superself._matchstatus(other, match)
1559 match = superself._matchstatus(other, match)
1558 if other != self._repo['.']:
1560 if other != self._repo['.']:
1559 def bad(f, msg):
1561 def bad(f, msg):
1560 # 'f' may be a directory pattern from 'match.files()',
1562 # 'f' may be a directory pattern from 'match.files()',
1561 # so 'f not in ctx1' is not enough
1563 # so 'f not in ctx1' is not enough
1562 if f not in other and f not in other.dirs():
1564 if f not in other and f not in other.dirs():
1563 self._repo.ui.warn('%s: %s\n' %
1565 self._repo.ui.warn('%s: %s\n' %
1564 (self._repo.dirstate.pathto(f), msg))
1566 (self._repo.dirstate.pathto(f), msg))
1565 match.bad = bad
1567 match.bad = bad
1566 return match
1568 return match
1567
1569
1568 class committablefilectx(basefilectx):
1570 class committablefilectx(basefilectx):
1569 """A committablefilectx provides common functionality for a file context
1571 """A committablefilectx provides common functionality for a file context
1570 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1572 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1571 def __init__(self, repo, path, filelog=None, ctx=None):
1573 def __init__(self, repo, path, filelog=None, ctx=None):
1572 self._repo = repo
1574 self._repo = repo
1573 self._path = path
1575 self._path = path
1574 self._changeid = None
1576 self._changeid = None
1575 self._filerev = self._filenode = None
1577 self._filerev = self._filenode = None
1576
1578
1577 if filelog is not None:
1579 if filelog is not None:
1578 self._filelog = filelog
1580 self._filelog = filelog
1579 if ctx:
1581 if ctx:
1580 self._changectx = ctx
1582 self._changectx = ctx
1581
1583
1582 def __nonzero__(self):
1584 def __nonzero__(self):
1583 return True
1585 return True
1584
1586
1585 def parents(self):
1587 def parents(self):
1586 '''return parent filectxs, following copies if necessary'''
1588 '''return parent filectxs, following copies if necessary'''
1587 def filenode(ctx, path):
1589 def filenode(ctx, path):
1588 return ctx._manifest.get(path, nullid)
1590 return ctx._manifest.get(path, nullid)
1589
1591
1590 path = self._path
1592 path = self._path
1591 fl = self._filelog
1593 fl = self._filelog
1592 pcl = self._changectx._parents
1594 pcl = self._changectx._parents
1593 renamed = self.renamed()
1595 renamed = self.renamed()
1594
1596
1595 if renamed:
1597 if renamed:
1596 pl = [renamed + (None,)]
1598 pl = [renamed + (None,)]
1597 else:
1599 else:
1598 pl = [(path, filenode(pcl[0], path), fl)]
1600 pl = [(path, filenode(pcl[0], path), fl)]
1599
1601
1600 for pc in pcl[1:]:
1602 for pc in pcl[1:]:
1601 pl.append((path, filenode(pc, path), fl))
1603 pl.append((path, filenode(pc, path), fl))
1602
1604
1603 return [filectx(self._repo, p, fileid=n, filelog=l)
1605 return [filectx(self._repo, p, fileid=n, filelog=l)
1604 for p, n, l in pl if n != nullid]
1606 for p, n, l in pl if n != nullid]
1605
1607
1606 def children(self):
1608 def children(self):
1607 return []
1609 return []
1608
1610
1609 class workingfilectx(committablefilectx):
1611 class workingfilectx(committablefilectx):
1610 """A workingfilectx object makes access to data related to a particular
1612 """A workingfilectx object makes access to data related to a particular
1611 file in the working directory convenient."""
1613 file in the working directory convenient."""
1612 def __init__(self, repo, path, filelog=None, workingctx=None):
1614 def __init__(self, repo, path, filelog=None, workingctx=None):
1613 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1615 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1614
1616
1615 @propertycache
1617 @propertycache
1616 def _changectx(self):
1618 def _changectx(self):
1617 return workingctx(self._repo)
1619 return workingctx(self._repo)
1618
1620
1619 def data(self):
1621 def data(self):
1620 return self._repo.wread(self._path)
1622 return self._repo.wread(self._path)
1621 def renamed(self):
1623 def renamed(self):
1622 rp = self._repo.dirstate.copied(self._path)
1624 rp = self._repo.dirstate.copied(self._path)
1623 if not rp:
1625 if not rp:
1624 return None
1626 return None
1625 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1627 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1626
1628
1627 def size(self):
1629 def size(self):
1628 return self._repo.wvfs.lstat(self._path).st_size
1630 return self._repo.wvfs.lstat(self._path).st_size
1629 def date(self):
1631 def date(self):
1630 t, tz = self._changectx.date()
1632 t, tz = self._changectx.date()
1631 try:
1633 try:
1632 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1634 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1633 except OSError, err:
1635 except OSError, err:
1634 if err.errno != errno.ENOENT:
1636 if err.errno != errno.ENOENT:
1635 raise
1637 raise
1636 return (t, tz)
1638 return (t, tz)
1637
1639
1638 def cmp(self, fctx):
1640 def cmp(self, fctx):
1639 """compare with other file context
1641 """compare with other file context
1640
1642
1641 returns True if different than fctx.
1643 returns True if different than fctx.
1642 """
1644 """
1643 # fctx should be a filectx (not a workingfilectx)
1645 # fctx should be a filectx (not a workingfilectx)
1644 # invert comparison to reuse the same code path
1646 # invert comparison to reuse the same code path
1645 return fctx.cmp(self)
1647 return fctx.cmp(self)
1646
1648
1647 def remove(self, ignoremissing=False):
1649 def remove(self, ignoremissing=False):
1648 """wraps unlink for a repo's working directory"""
1650 """wraps unlink for a repo's working directory"""
1649 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1651 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1650
1652
1651 def write(self, data, flags):
1653 def write(self, data, flags):
1652 """wraps repo.wwrite"""
1654 """wraps repo.wwrite"""
1653 self._repo.wwrite(self._path, data, flags)
1655 self._repo.wwrite(self._path, data, flags)
1654
1656
1655 class workingcommitctx(workingctx):
1657 class workingcommitctx(workingctx):
1656 """A workingcommitctx object makes access to data related to
1658 """A workingcommitctx object makes access to data related to
1657 the revision being committed convenient.
1659 the revision being committed convenient.
1658
1660
1659 This hides changes in the working directory, if they aren't
1661 This hides changes in the working directory, if they aren't
1660 committed in this context.
1662 committed in this context.
1661 """
1663 """
1662 def __init__(self, repo, changes,
1664 def __init__(self, repo, changes,
1663 text="", user=None, date=None, extra=None):
1665 text="", user=None, date=None, extra=None):
1664 super(workingctx, self).__init__(repo, text, user, date, extra,
1666 super(workingctx, self).__init__(repo, text, user, date, extra,
1665 changes)
1667 changes)
1666
1668
1667 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1669 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1668 unknown=False):
1670 unknown=False):
1669 """Return matched files only in ``self._status``
1671 """Return matched files only in ``self._status``
1670
1672
1671 Uncommitted files appear "clean" via this context, even if
1673 Uncommitted files appear "clean" via this context, even if
1672 they aren't actually so in the working directory.
1674 they aren't actually so in the working directory.
1673 """
1675 """
1674 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1676 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1675 if clean:
1677 if clean:
1676 clean = [f for f in self._manifest if f not in self._changedset]
1678 clean = [f for f in self._manifest if f not in self._changedset]
1677 else:
1679 else:
1678 clean = []
1680 clean = []
1679 return scmutil.status([f for f in self._status.modified if match(f)],
1681 return scmutil.status([f for f in self._status.modified if match(f)],
1680 [f for f in self._status.added if match(f)],
1682 [f for f in self._status.added if match(f)],
1681 [f for f in self._status.removed if match(f)],
1683 [f for f in self._status.removed if match(f)],
1682 [], [], [], clean)
1684 [], [], [], clean)
1683
1685
1684 @propertycache
1686 @propertycache
1685 def _changedset(self):
1687 def _changedset(self):
1686 """Return the set of files changed in this context
1688 """Return the set of files changed in this context
1687 """
1689 """
1688 changed = set(self._status.modified)
1690 changed = set(self._status.modified)
1689 changed.update(self._status.added)
1691 changed.update(self._status.added)
1690 changed.update(self._status.removed)
1692 changed.update(self._status.removed)
1691 return changed
1693 return changed
1692
1694
1693 class memctx(committablectx):
1695 class memctx(committablectx):
1694 """Use memctx to perform in-memory commits via localrepo.commitctx().
1696 """Use memctx to perform in-memory commits via localrepo.commitctx().
1695
1697
1696 Revision information is supplied at initialization time while
1698 Revision information is supplied at initialization time while
1697 related files data and is made available through a callback
1699 related files data and is made available through a callback
1698 mechanism. 'repo' is the current localrepo, 'parents' is a
1700 mechanism. 'repo' is the current localrepo, 'parents' is a
1699 sequence of two parent revisions identifiers (pass None for every
1701 sequence of two parent revisions identifiers (pass None for every
1700 missing parent), 'text' is the commit message and 'files' lists
1702 missing parent), 'text' is the commit message and 'files' lists
1701 names of files touched by the revision (normalized and relative to
1703 names of files touched by the revision (normalized and relative to
1702 repository root).
1704 repository root).
1703
1705
1704 filectxfn(repo, memctx, path) is a callable receiving the
1706 filectxfn(repo, memctx, path) is a callable receiving the
1705 repository, the current memctx object and the normalized path of
1707 repository, the current memctx object and the normalized path of
1706 requested file, relative to repository root. It is fired by the
1708 requested file, relative to repository root. It is fired by the
1707 commit function for every file in 'files', but calls order is
1709 commit function for every file in 'files', but calls order is
1708 undefined. If the file is available in the revision being
1710 undefined. If the file is available in the revision being
1709 committed (updated or added), filectxfn returns a memfilectx
1711 committed (updated or added), filectxfn returns a memfilectx
1710 object. If the file was removed, filectxfn raises an
1712 object. If the file was removed, filectxfn raises an
1711 IOError. Moved files are represented by marking the source file
1713 IOError. Moved files are represented by marking the source file
1712 removed and the new file added with copy information (see
1714 removed and the new file added with copy information (see
1713 memfilectx).
1715 memfilectx).
1714
1716
1715 user receives the committer name and defaults to current
1717 user receives the committer name and defaults to current
1716 repository username, date is the commit date in any format
1718 repository username, date is the commit date in any format
1717 supported by util.parsedate() and defaults to current date, extra
1719 supported by util.parsedate() and defaults to current date, extra
1718 is a dictionary of metadata or is left empty.
1720 is a dictionary of metadata or is left empty.
1719 """
1721 """
1720
1722
1721 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1723 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1722 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1724 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1723 # this field to determine what to do in filectxfn.
1725 # this field to determine what to do in filectxfn.
1724 _returnnoneformissingfiles = True
1726 _returnnoneformissingfiles = True
1725
1727
1726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1728 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1727 date=None, extra=None, editor=False):
1729 date=None, extra=None, editor=False):
1728 super(memctx, self).__init__(repo, text, user, date, extra)
1730 super(memctx, self).__init__(repo, text, user, date, extra)
1729 self._rev = None
1731 self._rev = None
1730 self._node = None
1732 self._node = None
1731 parents = [(p or nullid) for p in parents]
1733 parents = [(p or nullid) for p in parents]
1732 p1, p2 = parents
1734 p1, p2 = parents
1733 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1735 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1734 files = sorted(set(files))
1736 files = sorted(set(files))
1735 self._files = files
1737 self._files = files
1736 self.substate = {}
1738 self.substate = {}
1737
1739
1738 # if store is not callable, wrap it in a function
1740 # if store is not callable, wrap it in a function
1739 if not callable(filectxfn):
1741 if not callable(filectxfn):
1740 def getfilectx(repo, memctx, path):
1742 def getfilectx(repo, memctx, path):
1741 fctx = filectxfn[path]
1743 fctx = filectxfn[path]
1742 # this is weird but apparently we only keep track of one parent
1744 # this is weird but apparently we only keep track of one parent
1743 # (why not only store that instead of a tuple?)
1745 # (why not only store that instead of a tuple?)
1744 copied = fctx.renamed()
1746 copied = fctx.renamed()
1745 if copied:
1747 if copied:
1746 copied = copied[0]
1748 copied = copied[0]
1747 return memfilectx(repo, path, fctx.data(),
1749 return memfilectx(repo, path, fctx.data(),
1748 islink=fctx.islink(), isexec=fctx.isexec(),
1750 islink=fctx.islink(), isexec=fctx.isexec(),
1749 copied=copied, memctx=memctx)
1751 copied=copied, memctx=memctx)
1750 self._filectxfn = getfilectx
1752 self._filectxfn = getfilectx
1751 else:
1753 else:
1752 # "util.cachefunc" reduces invocation of possibly expensive
1754 # "util.cachefunc" reduces invocation of possibly expensive
1753 # "filectxfn" for performance (e.g. converting from another VCS)
1755 # "filectxfn" for performance (e.g. converting from another VCS)
1754 self._filectxfn = util.cachefunc(filectxfn)
1756 self._filectxfn = util.cachefunc(filectxfn)
1755
1757
1756 self._extra = extra and extra.copy() or {}
1758 self._extra = extra and extra.copy() or {}
1757 if self._extra.get('branch', '') == '':
1759 if self._extra.get('branch', '') == '':
1758 self._extra['branch'] = 'default'
1760 self._extra['branch'] = 'default'
1759
1761
1760 if editor:
1762 if editor:
1761 self._text = editor(self._repo, self, [])
1763 self._text = editor(self._repo, self, [])
1762 self._repo.savecommitmessage(self._text)
1764 self._repo.savecommitmessage(self._text)
1763
1765
1764 def filectx(self, path, filelog=None):
1766 def filectx(self, path, filelog=None):
1765 """get a file context from the working directory
1767 """get a file context from the working directory
1766
1768
1767 Returns None if file doesn't exist and should be removed."""
1769 Returns None if file doesn't exist and should be removed."""
1768 return self._filectxfn(self._repo, self, path)
1770 return self._filectxfn(self._repo, self, path)
1769
1771
1770 def commit(self):
1772 def commit(self):
1771 """commit context to the repo"""
1773 """commit context to the repo"""
1772 return self._repo.commitctx(self)
1774 return self._repo.commitctx(self)
1773
1775
1774 @propertycache
1776 @propertycache
1775 def _manifest(self):
1777 def _manifest(self):
1776 """generate a manifest based on the return values of filectxfn"""
1778 """generate a manifest based on the return values of filectxfn"""
1777
1779
1778 # keep this simple for now; just worry about p1
1780 # keep this simple for now; just worry about p1
1779 pctx = self._parents[0]
1781 pctx = self._parents[0]
1780 man = pctx.manifest().copy()
1782 man = pctx.manifest().copy()
1781
1783
1782 for f in self._status.modified:
1784 for f in self._status.modified:
1783 p1node = nullid
1785 p1node = nullid
1784 p2node = nullid
1786 p2node = nullid
1785 p = pctx[f].parents() # if file isn't in pctx, check p2?
1787 p = pctx[f].parents() # if file isn't in pctx, check p2?
1786 if len(p) > 0:
1788 if len(p) > 0:
1787 p1node = p[0].node()
1789 p1node = p[0].node()
1788 if len(p) > 1:
1790 if len(p) > 1:
1789 p2node = p[1].node()
1791 p2node = p[1].node()
1790 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1792 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1791
1793
1792 for f in self._status.added:
1794 for f in self._status.added:
1793 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1795 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1794
1796
1795 for f in self._status.removed:
1797 for f in self._status.removed:
1796 if f in man:
1798 if f in man:
1797 del man[f]
1799 del man[f]
1798
1800
1799 return man
1801 return man
1800
1802
1801 @propertycache
1803 @propertycache
1802 def _status(self):
1804 def _status(self):
1803 """Calculate exact status from ``files`` specified at construction
1805 """Calculate exact status from ``files`` specified at construction
1804 """
1806 """
1805 man1 = self.p1().manifest()
1807 man1 = self.p1().manifest()
1806 p2 = self._parents[1]
1808 p2 = self._parents[1]
1807 # "1 < len(self._parents)" can't be used for checking
1809 # "1 < len(self._parents)" can't be used for checking
1808 # existence of the 2nd parent, because "memctx._parents" is
1810 # existence of the 2nd parent, because "memctx._parents" is
1809 # explicitly initialized by the list, of which length is 2.
1811 # explicitly initialized by the list, of which length is 2.
1810 if p2.node() != nullid:
1812 if p2.node() != nullid:
1811 man2 = p2.manifest()
1813 man2 = p2.manifest()
1812 managing = lambda f: f in man1 or f in man2
1814 managing = lambda f: f in man1 or f in man2
1813 else:
1815 else:
1814 managing = lambda f: f in man1
1816 managing = lambda f: f in man1
1815
1817
1816 modified, added, removed = [], [], []
1818 modified, added, removed = [], [], []
1817 for f in self._files:
1819 for f in self._files:
1818 if not managing(f):
1820 if not managing(f):
1819 added.append(f)
1821 added.append(f)
1820 elif self[f]:
1822 elif self[f]:
1821 modified.append(f)
1823 modified.append(f)
1822 else:
1824 else:
1823 removed.append(f)
1825 removed.append(f)
1824
1826
1825 return scmutil.status(modified, added, removed, [], [], [], [])
1827 return scmutil.status(modified, added, removed, [], [], [], [])
1826
1828
1827 class memfilectx(committablefilectx):
1829 class memfilectx(committablefilectx):
1828 """memfilectx represents an in-memory file to commit.
1830 """memfilectx represents an in-memory file to commit.
1829
1831
1830 See memctx and committablefilectx for more details.
1832 See memctx and committablefilectx for more details.
1831 """
1833 """
1832 def __init__(self, repo, path, data, islink=False,
1834 def __init__(self, repo, path, data, islink=False,
1833 isexec=False, copied=None, memctx=None):
1835 isexec=False, copied=None, memctx=None):
1834 """
1836 """
1835 path is the normalized file path relative to repository root.
1837 path is the normalized file path relative to repository root.
1836 data is the file content as a string.
1838 data is the file content as a string.
1837 islink is True if the file is a symbolic link.
1839 islink is True if the file is a symbolic link.
1838 isexec is True if the file is executable.
1840 isexec is True if the file is executable.
1839 copied is the source file path if current file was copied in the
1841 copied is the source file path if current file was copied in the
1840 revision being committed, or None."""
1842 revision being committed, or None."""
1841 super(memfilectx, self).__init__(repo, path, None, memctx)
1843 super(memfilectx, self).__init__(repo, path, None, memctx)
1842 self._data = data
1844 self._data = data
1843 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1845 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1844 self._copied = None
1846 self._copied = None
1845 if copied:
1847 if copied:
1846 self._copied = (copied, nullid)
1848 self._copied = (copied, nullid)
1847
1849
1848 def data(self):
1850 def data(self):
1849 return self._data
1851 return self._data
1850 def size(self):
1852 def size(self):
1851 return len(self.data())
1853 return len(self.data())
1852 def flags(self):
1854 def flags(self):
1853 return self._flags
1855 return self._flags
1854 def renamed(self):
1856 def renamed(self):
1855 return self._copied
1857 return self._copied
1856
1858
1857 def remove(self, ignoremissing=False):
1859 def remove(self, ignoremissing=False):
1858 """wraps unlink for a repo's working directory"""
1860 """wraps unlink for a repo's working directory"""
1859 # need to figure out what to do here
1861 # need to figure out what to do here
1860 del self._changectx[self._path]
1862 del self._changectx[self._path]
1861
1863
1862 def write(self, data, flags):
1864 def write(self, data, flags):
1863 """wraps repo.wwrite"""
1865 """wraps repo.wwrite"""
1864 self._data = data
1866 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now