##// END OF EJS Templates
filectx.parents: also fetch the filelog of rename source too...
Pierre-Yves David -
r23699:fe17a6fb default
parent child Browse files
Show More
@@ -1,1730 +1,1730 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid, nullrev, short, hex, bin
8 from node import nullid, nullrev, short, hex, bin
9 from i18n import _
9 from i18n import _
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
10 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
11 import match as matchmod
11 import match as matchmod
12 import os, errno, stat
12 import os, errno, stat
13 import obsolete as obsmod
13 import obsolete as obsmod
14 import repoview
14 import repoview
15 import fileset
15 import fileset
16 import revlog
16 import revlog
17
17
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 # Phony node value to stand-in for new files in some uses of
20 # Phony node value to stand-in for new files in some uses of
21 # manifests. Manifests support 21-byte hashes for nodes which are
21 # manifests. Manifests support 21-byte hashes for nodes which are
22 # dirty in the working copy.
22 # dirty in the working copy.
23 _newnode = '!' * 21
23 _newnode = '!' * 21
24
24
25 class basectx(object):
25 class basectx(object):
26 """A basectx object represents the common logic for its children:
26 """A basectx object represents the common logic for its children:
27 changectx: read-only context that is already present in the repo,
27 changectx: read-only context that is already present in the repo,
28 workingctx: a context that represents the working directory and can
28 workingctx: a context that represents the working directory and can
29 be committed,
29 be committed,
30 memctx: a context that represents changes in-memory and can also
30 memctx: a context that represents changes in-memory and can also
31 be committed."""
31 be committed."""
32 def __new__(cls, repo, changeid='', *args, **kwargs):
32 def __new__(cls, repo, changeid='', *args, **kwargs):
33 if isinstance(changeid, basectx):
33 if isinstance(changeid, basectx):
34 return changeid
34 return changeid
35
35
36 o = super(basectx, cls).__new__(cls)
36 o = super(basectx, cls).__new__(cls)
37
37
38 o._repo = repo
38 o._repo = repo
39 o._rev = nullrev
39 o._rev = nullrev
40 o._node = nullid
40 o._node = nullid
41
41
42 return o
42 return o
43
43
44 def __str__(self):
44 def __str__(self):
45 return short(self.node())
45 return short(self.node())
46
46
47 def __int__(self):
47 def __int__(self):
48 return self.rev()
48 return self.rev()
49
49
50 def __repr__(self):
50 def __repr__(self):
51 return "<%s %s>" % (type(self).__name__, str(self))
51 return "<%s %s>" % (type(self).__name__, str(self))
52
52
53 def __eq__(self, other):
53 def __eq__(self, other):
54 try:
54 try:
55 return type(self) == type(other) and self._rev == other._rev
55 return type(self) == type(other) and self._rev == other._rev
56 except AttributeError:
56 except AttributeError:
57 return False
57 return False
58
58
59 def __ne__(self, other):
59 def __ne__(self, other):
60 return not (self == other)
60 return not (self == other)
61
61
62 def __contains__(self, key):
62 def __contains__(self, key):
63 return key in self._manifest
63 return key in self._manifest
64
64
65 def __getitem__(self, key):
65 def __getitem__(self, key):
66 return self.filectx(key)
66 return self.filectx(key)
67
67
68 def __iter__(self):
68 def __iter__(self):
69 for f in sorted(self._manifest):
69 for f in sorted(self._manifest):
70 yield f
70 yield f
71
71
72 def _manifestmatches(self, match, s):
72 def _manifestmatches(self, match, s):
73 """generate a new manifest filtered by the match argument
73 """generate a new manifest filtered by the match argument
74
74
75 This method is for internal use only and mainly exists to provide an
75 This method is for internal use only and mainly exists to provide an
76 object oriented way for other contexts to customize the manifest
76 object oriented way for other contexts to customize the manifest
77 generation.
77 generation.
78 """
78 """
79 return self.manifest().matches(match)
79 return self.manifest().matches(match)
80
80
81 def _matchstatus(self, other, match):
81 def _matchstatus(self, other, match):
82 """return match.always if match is none
82 """return match.always if match is none
83
83
84 This internal method provides a way for child objects to override the
84 This internal method provides a way for child objects to override the
85 match operator.
85 match operator.
86 """
86 """
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
87 return match or matchmod.always(self._repo.root, self._repo.getcwd())
88
88
89 def _buildstatus(self, other, s, match, listignored, listclean,
89 def _buildstatus(self, other, s, match, listignored, listclean,
90 listunknown):
90 listunknown):
91 """build a status with respect to another context"""
91 """build a status with respect to another context"""
92 # Load earliest manifest first for caching reasons. More specifically,
92 # Load earliest manifest first for caching reasons. More specifically,
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
93 # if you have revisions 1000 and 1001, 1001 is probably stored as a
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
94 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
95 # 1000 and cache it so that when you read 1001, we just need to apply a
95 # 1000 and cache it so that when you read 1001, we just need to apply a
96 # delta to what's in the cache. So that's one full reconstruction + one
96 # delta to what's in the cache. So that's one full reconstruction + one
97 # delta application.
97 # delta application.
98 if self.rev() is not None and self.rev() < other.rev():
98 if self.rev() is not None and self.rev() < other.rev():
99 self.manifest()
99 self.manifest()
100 mf1 = other._manifestmatches(match, s)
100 mf1 = other._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
101 mf2 = self._manifestmatches(match, s)
102
102
103 modified, added, clean = [], [], []
103 modified, added, clean = [], [], []
104 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
104 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
105 deletedset = set(deleted)
105 deletedset = set(deleted)
106 withflags = mf1.withflags() | mf2.withflags()
106 withflags = mf1.withflags() | mf2.withflags()
107 for fn, mf2node in mf2.iteritems():
107 for fn, mf2node in mf2.iteritems():
108 if fn in mf1:
108 if fn in mf1:
109 if (fn not in deletedset and
109 if (fn not in deletedset and
110 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
110 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
111 (mf1[fn] != mf2node and
111 (mf1[fn] != mf2node and
112 (mf2node != _newnode or self[fn].cmp(other[fn]))))):
112 (mf2node != _newnode or self[fn].cmp(other[fn]))))):
113 modified.append(fn)
113 modified.append(fn)
114 elif listclean:
114 elif listclean:
115 clean.append(fn)
115 clean.append(fn)
116 del mf1[fn]
116 del mf1[fn]
117 elif fn not in deletedset:
117 elif fn not in deletedset:
118 added.append(fn)
118 added.append(fn)
119 removed = mf1.keys()
119 removed = mf1.keys()
120 if removed:
120 if removed:
121 # need to filter files if they are already reported as removed
121 # need to filter files if they are already reported as removed
122 unknown = [fn for fn in unknown if fn not in mf1]
122 unknown = [fn for fn in unknown if fn not in mf1]
123 ignored = [fn for fn in ignored if fn not in mf1]
123 ignored = [fn for fn in ignored if fn not in mf1]
124
124
125 return scmutil.status(modified, added, removed, deleted, unknown,
125 return scmutil.status(modified, added, removed, deleted, unknown,
126 ignored, clean)
126 ignored, clean)
127
127
128 @propertycache
128 @propertycache
129 def substate(self):
129 def substate(self):
130 return subrepo.state(self, self._repo.ui)
130 return subrepo.state(self, self._repo.ui)
131
131
132 def subrev(self, subpath):
132 def subrev(self, subpath):
133 return self.substate[subpath][1]
133 return self.substate[subpath][1]
134
134
135 def rev(self):
135 def rev(self):
136 return self._rev
136 return self._rev
137 def node(self):
137 def node(self):
138 return self._node
138 return self._node
139 def hex(self):
139 def hex(self):
140 return hex(self.node())
140 return hex(self.node())
141 def manifest(self):
141 def manifest(self):
142 return self._manifest
142 return self._manifest
143 def phasestr(self):
143 def phasestr(self):
144 return phases.phasenames[self.phase()]
144 return phases.phasenames[self.phase()]
145 def mutable(self):
145 def mutable(self):
146 return self.phase() > phases.public
146 return self.phase() > phases.public
147
147
148 def getfileset(self, expr):
148 def getfileset(self, expr):
149 return fileset.getfileset(self, expr)
149 return fileset.getfileset(self, expr)
150
150
151 def obsolete(self):
151 def obsolete(self):
152 """True if the changeset is obsolete"""
152 """True if the changeset is obsolete"""
153 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
153 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
154
154
155 def extinct(self):
155 def extinct(self):
156 """True if the changeset is extinct"""
156 """True if the changeset is extinct"""
157 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
157 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
158
158
159 def unstable(self):
159 def unstable(self):
160 """True if the changeset is not obsolete but it's ancestor are"""
160 """True if the changeset is not obsolete but it's ancestor are"""
161 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
161 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
162
162
163 def bumped(self):
163 def bumped(self):
164 """True if the changeset try to be a successor of a public changeset
164 """True if the changeset try to be a successor of a public changeset
165
165
166 Only non-public and non-obsolete changesets may be bumped.
166 Only non-public and non-obsolete changesets may be bumped.
167 """
167 """
168 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
168 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
169
169
170 def divergent(self):
170 def divergent(self):
171 """Is a successors of a changeset with multiple possible successors set
171 """Is a successors of a changeset with multiple possible successors set
172
172
173 Only non-public and non-obsolete changesets may be divergent.
173 Only non-public and non-obsolete changesets may be divergent.
174 """
174 """
175 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
175 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
176
176
177 def troubled(self):
177 def troubled(self):
178 """True if the changeset is either unstable, bumped or divergent"""
178 """True if the changeset is either unstable, bumped or divergent"""
179 return self.unstable() or self.bumped() or self.divergent()
179 return self.unstable() or self.bumped() or self.divergent()
180
180
181 def troubles(self):
181 def troubles(self):
182 """return the list of troubles affecting this changesets.
182 """return the list of troubles affecting this changesets.
183
183
184 Troubles are returned as strings. possible values are:
184 Troubles are returned as strings. possible values are:
185 - unstable,
185 - unstable,
186 - bumped,
186 - bumped,
187 - divergent.
187 - divergent.
188 """
188 """
189 troubles = []
189 troubles = []
190 if self.unstable():
190 if self.unstable():
191 troubles.append('unstable')
191 troubles.append('unstable')
192 if self.bumped():
192 if self.bumped():
193 troubles.append('bumped')
193 troubles.append('bumped')
194 if self.divergent():
194 if self.divergent():
195 troubles.append('divergent')
195 troubles.append('divergent')
196 return troubles
196 return troubles
197
197
198 def parents(self):
198 def parents(self):
199 """return contexts for each parent changeset"""
199 """return contexts for each parent changeset"""
200 return self._parents
200 return self._parents
201
201
202 def p1(self):
202 def p1(self):
203 return self._parents[0]
203 return self._parents[0]
204
204
205 def p2(self):
205 def p2(self):
206 if len(self._parents) == 2:
206 if len(self._parents) == 2:
207 return self._parents[1]
207 return self._parents[1]
208 return changectx(self._repo, -1)
208 return changectx(self._repo, -1)
209
209
210 def _fileinfo(self, path):
210 def _fileinfo(self, path):
211 if '_manifest' in self.__dict__:
211 if '_manifest' in self.__dict__:
212 try:
212 try:
213 return self._manifest[path], self._manifest.flags(path)
213 return self._manifest[path], self._manifest.flags(path)
214 except KeyError:
214 except KeyError:
215 raise error.ManifestLookupError(self._node, path,
215 raise error.ManifestLookupError(self._node, path,
216 _('not found in manifest'))
216 _('not found in manifest'))
217 if '_manifestdelta' in self.__dict__ or path in self.files():
217 if '_manifestdelta' in self.__dict__ or path in self.files():
218 if path in self._manifestdelta:
218 if path in self._manifestdelta:
219 return (self._manifestdelta[path],
219 return (self._manifestdelta[path],
220 self._manifestdelta.flags(path))
220 self._manifestdelta.flags(path))
221 node, flag = self._repo.manifest.find(self._changeset[0], path)
221 node, flag = self._repo.manifest.find(self._changeset[0], path)
222 if not node:
222 if not node:
223 raise error.ManifestLookupError(self._node, path,
223 raise error.ManifestLookupError(self._node, path,
224 _('not found in manifest'))
224 _('not found in manifest'))
225
225
226 return node, flag
226 return node, flag
227
227
228 def filenode(self, path):
228 def filenode(self, path):
229 return self._fileinfo(path)[0]
229 return self._fileinfo(path)[0]
230
230
231 def flags(self, path):
231 def flags(self, path):
232 try:
232 try:
233 return self._fileinfo(path)[1]
233 return self._fileinfo(path)[1]
234 except error.LookupError:
234 except error.LookupError:
235 return ''
235 return ''
236
236
237 def sub(self, path):
237 def sub(self, path):
238 return subrepo.subrepo(self, path)
238 return subrepo.subrepo(self, path)
239
239
240 def match(self, pats=[], include=None, exclude=None, default='glob'):
240 def match(self, pats=[], include=None, exclude=None, default='glob'):
241 r = self._repo
241 r = self._repo
242 return matchmod.match(r.root, r.getcwd(), pats,
242 return matchmod.match(r.root, r.getcwd(), pats,
243 include, exclude, default,
243 include, exclude, default,
244 auditor=r.auditor, ctx=self)
244 auditor=r.auditor, ctx=self)
245
245
246 def diff(self, ctx2=None, match=None, **opts):
246 def diff(self, ctx2=None, match=None, **opts):
247 """Returns a diff generator for the given contexts and matcher"""
247 """Returns a diff generator for the given contexts and matcher"""
248 if ctx2 is None:
248 if ctx2 is None:
249 ctx2 = self.p1()
249 ctx2 = self.p1()
250 if ctx2 is not None:
250 if ctx2 is not None:
251 ctx2 = self._repo[ctx2]
251 ctx2 = self._repo[ctx2]
252 diffopts = patch.diffopts(self._repo.ui, opts)
252 diffopts = patch.diffopts(self._repo.ui, opts)
253 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
253 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
254
254
255 @propertycache
255 @propertycache
256 def _dirs(self):
256 def _dirs(self):
257 return scmutil.dirs(self._manifest)
257 return scmutil.dirs(self._manifest)
258
258
259 def dirs(self):
259 def dirs(self):
260 return self._dirs
260 return self._dirs
261
261
262 def dirty(self, missing=False, merge=True, branch=True):
262 def dirty(self, missing=False, merge=True, branch=True):
263 return False
263 return False
264
264
265 def status(self, other=None, match=None, listignored=False,
265 def status(self, other=None, match=None, listignored=False,
266 listclean=False, listunknown=False, listsubrepos=False):
266 listclean=False, listunknown=False, listsubrepos=False):
267 """return status of files between two nodes or node and working
267 """return status of files between two nodes or node and working
268 directory.
268 directory.
269
269
270 If other is None, compare this node with working directory.
270 If other is None, compare this node with working directory.
271
271
272 returns (modified, added, removed, deleted, unknown, ignored, clean)
272 returns (modified, added, removed, deleted, unknown, ignored, clean)
273 """
273 """
274
274
275 ctx1 = self
275 ctx1 = self
276 ctx2 = self._repo[other]
276 ctx2 = self._repo[other]
277
277
278 # This next code block is, admittedly, fragile logic that tests for
278 # This next code block is, admittedly, fragile logic that tests for
279 # reversing the contexts and wouldn't need to exist if it weren't for
279 # reversing the contexts and wouldn't need to exist if it weren't for
280 # the fast (and common) code path of comparing the working directory
280 # the fast (and common) code path of comparing the working directory
281 # with its first parent.
281 # with its first parent.
282 #
282 #
283 # What we're aiming for here is the ability to call:
283 # What we're aiming for here is the ability to call:
284 #
284 #
285 # workingctx.status(parentctx)
285 # workingctx.status(parentctx)
286 #
286 #
287 # If we always built the manifest for each context and compared those,
287 # If we always built the manifest for each context and compared those,
288 # then we'd be done. But the special case of the above call means we
288 # then we'd be done. But the special case of the above call means we
289 # just copy the manifest of the parent.
289 # just copy the manifest of the parent.
290 reversed = False
290 reversed = False
291 if (not isinstance(ctx1, changectx)
291 if (not isinstance(ctx1, changectx)
292 and isinstance(ctx2, changectx)):
292 and isinstance(ctx2, changectx)):
293 reversed = True
293 reversed = True
294 ctx1, ctx2 = ctx2, ctx1
294 ctx1, ctx2 = ctx2, ctx1
295
295
296 match = ctx2._matchstatus(ctx1, match)
296 match = ctx2._matchstatus(ctx1, match)
297 r = scmutil.status([], [], [], [], [], [], [])
297 r = scmutil.status([], [], [], [], [], [], [])
298 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
298 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
299 listunknown)
299 listunknown)
300
300
301 if reversed:
301 if reversed:
302 # Reverse added and removed. Clear deleted, unknown and ignored as
302 # Reverse added and removed. Clear deleted, unknown and ignored as
303 # these make no sense to reverse.
303 # these make no sense to reverse.
304 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
304 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
305 r.clean)
305 r.clean)
306
306
307 if listsubrepos:
307 if listsubrepos:
308 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
308 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
309 rev2 = ctx2.subrev(subpath)
309 rev2 = ctx2.subrev(subpath)
310 try:
310 try:
311 submatch = matchmod.narrowmatcher(subpath, match)
311 submatch = matchmod.narrowmatcher(subpath, match)
312 s = sub.status(rev2, match=submatch, ignored=listignored,
312 s = sub.status(rev2, match=submatch, ignored=listignored,
313 clean=listclean, unknown=listunknown,
313 clean=listclean, unknown=listunknown,
314 listsubrepos=True)
314 listsubrepos=True)
315 for rfiles, sfiles in zip(r, s):
315 for rfiles, sfiles in zip(r, s):
316 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
316 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
317 except error.LookupError:
317 except error.LookupError:
318 self._repo.ui.status(_("skipping missing "
318 self._repo.ui.status(_("skipping missing "
319 "subrepository: %s\n") % subpath)
319 "subrepository: %s\n") % subpath)
320
320
321 for l in r:
321 for l in r:
322 l.sort()
322 l.sort()
323
323
324 return r
324 return r
325
325
326
326
327 def makememctx(repo, parents, text, user, date, branch, files, store,
327 def makememctx(repo, parents, text, user, date, branch, files, store,
328 editor=None):
328 editor=None):
329 def getfilectx(repo, memctx, path):
329 def getfilectx(repo, memctx, path):
330 data, mode, copied = store.getfile(path)
330 data, mode, copied = store.getfile(path)
331 if data is None:
331 if data is None:
332 return None
332 return None
333 islink, isexec = mode
333 islink, isexec = mode
334 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
334 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
335 copied=copied, memctx=memctx)
335 copied=copied, memctx=memctx)
336 extra = {}
336 extra = {}
337 if branch:
337 if branch:
338 extra['branch'] = encoding.fromlocal(branch)
338 extra['branch'] = encoding.fromlocal(branch)
339 ctx = memctx(repo, parents, text, files, getfilectx, user,
339 ctx = memctx(repo, parents, text, files, getfilectx, user,
340 date, extra, editor)
340 date, extra, editor)
341 return ctx
341 return ctx
342
342
343 class changectx(basectx):
343 class changectx(basectx):
344 """A changecontext object makes access to data related to a particular
344 """A changecontext object makes access to data related to a particular
345 changeset convenient. It represents a read-only context already present in
345 changeset convenient. It represents a read-only context already present in
346 the repo."""
346 the repo."""
347 def __init__(self, repo, changeid=''):
347 def __init__(self, repo, changeid=''):
348 """changeid is a revision number, node, or tag"""
348 """changeid is a revision number, node, or tag"""
349
349
350 # since basectx.__new__ already took care of copying the object, we
350 # since basectx.__new__ already took care of copying the object, we
351 # don't need to do anything in __init__, so we just exit here
351 # don't need to do anything in __init__, so we just exit here
352 if isinstance(changeid, basectx):
352 if isinstance(changeid, basectx):
353 return
353 return
354
354
355 if changeid == '':
355 if changeid == '':
356 changeid = '.'
356 changeid = '.'
357 self._repo = repo
357 self._repo = repo
358
358
359 try:
359 try:
360 if isinstance(changeid, int):
360 if isinstance(changeid, int):
361 self._node = repo.changelog.node(changeid)
361 self._node = repo.changelog.node(changeid)
362 self._rev = changeid
362 self._rev = changeid
363 return
363 return
364 if isinstance(changeid, long):
364 if isinstance(changeid, long):
365 changeid = str(changeid)
365 changeid = str(changeid)
366 if changeid == '.':
366 if changeid == '.':
367 self._node = repo.dirstate.p1()
367 self._node = repo.dirstate.p1()
368 self._rev = repo.changelog.rev(self._node)
368 self._rev = repo.changelog.rev(self._node)
369 return
369 return
370 if changeid == 'null':
370 if changeid == 'null':
371 self._node = nullid
371 self._node = nullid
372 self._rev = nullrev
372 self._rev = nullrev
373 return
373 return
374 if changeid == 'tip':
374 if changeid == 'tip':
375 self._node = repo.changelog.tip()
375 self._node = repo.changelog.tip()
376 self._rev = repo.changelog.rev(self._node)
376 self._rev = repo.changelog.rev(self._node)
377 return
377 return
378 if len(changeid) == 20:
378 if len(changeid) == 20:
379 try:
379 try:
380 self._node = changeid
380 self._node = changeid
381 self._rev = repo.changelog.rev(changeid)
381 self._rev = repo.changelog.rev(changeid)
382 return
382 return
383 except error.FilteredRepoLookupError:
383 except error.FilteredRepoLookupError:
384 raise
384 raise
385 except LookupError:
385 except LookupError:
386 pass
386 pass
387
387
388 try:
388 try:
389 r = int(changeid)
389 r = int(changeid)
390 if str(r) != changeid:
390 if str(r) != changeid:
391 raise ValueError
391 raise ValueError
392 l = len(repo.changelog)
392 l = len(repo.changelog)
393 if r < 0:
393 if r < 0:
394 r += l
394 r += l
395 if r < 0 or r >= l:
395 if r < 0 or r >= l:
396 raise ValueError
396 raise ValueError
397 self._rev = r
397 self._rev = r
398 self._node = repo.changelog.node(r)
398 self._node = repo.changelog.node(r)
399 return
399 return
400 except error.FilteredIndexError:
400 except error.FilteredIndexError:
401 raise
401 raise
402 except (ValueError, OverflowError, IndexError):
402 except (ValueError, OverflowError, IndexError):
403 pass
403 pass
404
404
405 if len(changeid) == 40:
405 if len(changeid) == 40:
406 try:
406 try:
407 self._node = bin(changeid)
407 self._node = bin(changeid)
408 self._rev = repo.changelog.rev(self._node)
408 self._rev = repo.changelog.rev(self._node)
409 return
409 return
410 except error.FilteredLookupError:
410 except error.FilteredLookupError:
411 raise
411 raise
412 except (TypeError, LookupError):
412 except (TypeError, LookupError):
413 pass
413 pass
414
414
415 # lookup bookmarks through the name interface
415 # lookup bookmarks through the name interface
416 try:
416 try:
417 self._node = repo.names.singlenode(repo, changeid)
417 self._node = repo.names.singlenode(repo, changeid)
418 self._rev = repo.changelog.rev(self._node)
418 self._rev = repo.changelog.rev(self._node)
419 return
419 return
420 except KeyError:
420 except KeyError:
421 pass
421 pass
422 except error.FilteredRepoLookupError:
422 except error.FilteredRepoLookupError:
423 raise
423 raise
424 except error.RepoLookupError:
424 except error.RepoLookupError:
425 pass
425 pass
426
426
427 self._node = repo.unfiltered().changelog._partialmatch(changeid)
427 self._node = repo.unfiltered().changelog._partialmatch(changeid)
428 if self._node is not None:
428 if self._node is not None:
429 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
430 return
430 return
431
431
432 # lookup failed
432 # lookup failed
433 # check if it might have come from damaged dirstate
433 # check if it might have come from damaged dirstate
434 #
434 #
435 # XXX we could avoid the unfiltered if we had a recognizable
435 # XXX we could avoid the unfiltered if we had a recognizable
436 # exception for filtered changeset access
436 # exception for filtered changeset access
437 if changeid in repo.unfiltered().dirstate.parents():
437 if changeid in repo.unfiltered().dirstate.parents():
438 msg = _("working directory has unknown parent '%s'!")
438 msg = _("working directory has unknown parent '%s'!")
439 raise error.Abort(msg % short(changeid))
439 raise error.Abort(msg % short(changeid))
440 try:
440 try:
441 if len(changeid) == 20:
441 if len(changeid) == 20:
442 changeid = hex(changeid)
442 changeid = hex(changeid)
443 except TypeError:
443 except TypeError:
444 pass
444 pass
445 except (error.FilteredIndexError, error.FilteredLookupError,
445 except (error.FilteredIndexError, error.FilteredLookupError,
446 error.FilteredRepoLookupError):
446 error.FilteredRepoLookupError):
447 if repo.filtername == 'visible':
447 if repo.filtername == 'visible':
448 msg = _("hidden revision '%s'") % changeid
448 msg = _("hidden revision '%s'") % changeid
449 hint = _('use --hidden to access hidden revisions')
449 hint = _('use --hidden to access hidden revisions')
450 raise error.FilteredRepoLookupError(msg, hint=hint)
450 raise error.FilteredRepoLookupError(msg, hint=hint)
451 msg = _("filtered revision '%s' (not in '%s' subset)")
451 msg = _("filtered revision '%s' (not in '%s' subset)")
452 msg %= (changeid, repo.filtername)
452 msg %= (changeid, repo.filtername)
453 raise error.FilteredRepoLookupError(msg)
453 raise error.FilteredRepoLookupError(msg)
454 except IndexError:
454 except IndexError:
455 pass
455 pass
456 raise error.RepoLookupError(
456 raise error.RepoLookupError(
457 _("unknown revision '%s'") % changeid)
457 _("unknown revision '%s'") % changeid)
458
458
459 def __hash__(self):
459 def __hash__(self):
460 try:
460 try:
461 return hash(self._rev)
461 return hash(self._rev)
462 except AttributeError:
462 except AttributeError:
463 return id(self)
463 return id(self)
464
464
465 def __nonzero__(self):
465 def __nonzero__(self):
466 return self._rev != nullrev
466 return self._rev != nullrev
467
467
468 @propertycache
468 @propertycache
469 def _changeset(self):
469 def _changeset(self):
470 return self._repo.changelog.read(self.rev())
470 return self._repo.changelog.read(self.rev())
471
471
472 @propertycache
472 @propertycache
473 def _manifest(self):
473 def _manifest(self):
474 return self._repo.manifest.read(self._changeset[0])
474 return self._repo.manifest.read(self._changeset[0])
475
475
476 @propertycache
476 @propertycache
477 def _manifestdelta(self):
477 def _manifestdelta(self):
478 return self._repo.manifest.readdelta(self._changeset[0])
478 return self._repo.manifest.readdelta(self._changeset[0])
479
479
480 @propertycache
480 @propertycache
481 def _parents(self):
481 def _parents(self):
482 p = self._repo.changelog.parentrevs(self._rev)
482 p = self._repo.changelog.parentrevs(self._rev)
483 if p[1] == nullrev:
483 if p[1] == nullrev:
484 p = p[:-1]
484 p = p[:-1]
485 return [changectx(self._repo, x) for x in p]
485 return [changectx(self._repo, x) for x in p]
486
486
487 def changeset(self):
487 def changeset(self):
488 return self._changeset
488 return self._changeset
489 def manifestnode(self):
489 def manifestnode(self):
490 return self._changeset[0]
490 return self._changeset[0]
491
491
492 def user(self):
492 def user(self):
493 return self._changeset[1]
493 return self._changeset[1]
494 def date(self):
494 def date(self):
495 return self._changeset[2]
495 return self._changeset[2]
496 def files(self):
496 def files(self):
497 return self._changeset[3]
497 return self._changeset[3]
498 def description(self):
498 def description(self):
499 return self._changeset[4]
499 return self._changeset[4]
500 def branch(self):
500 def branch(self):
501 return encoding.tolocal(self._changeset[5].get("branch"))
501 return encoding.tolocal(self._changeset[5].get("branch"))
502 def closesbranch(self):
502 def closesbranch(self):
503 return 'close' in self._changeset[5]
503 return 'close' in self._changeset[5]
504 def extra(self):
504 def extra(self):
505 return self._changeset[5]
505 return self._changeset[5]
506 def tags(self):
506 def tags(self):
507 return self._repo.nodetags(self._node)
507 return self._repo.nodetags(self._node)
508 def bookmarks(self):
508 def bookmarks(self):
509 return self._repo.nodebookmarks(self._node)
509 return self._repo.nodebookmarks(self._node)
510 def phase(self):
510 def phase(self):
511 return self._repo._phasecache.phase(self._repo, self._rev)
511 return self._repo._phasecache.phase(self._repo, self._rev)
512 def hidden(self):
512 def hidden(self):
513 return self._rev in repoview.filterrevs(self._repo, 'visible')
513 return self._rev in repoview.filterrevs(self._repo, 'visible')
514
514
515 def children(self):
515 def children(self):
516 """return contexts for each child changeset"""
516 """return contexts for each child changeset"""
517 c = self._repo.changelog.children(self._node)
517 c = self._repo.changelog.children(self._node)
518 return [changectx(self._repo, x) for x in c]
518 return [changectx(self._repo, x) for x in c]
519
519
520 def ancestors(self):
520 def ancestors(self):
521 for a in self._repo.changelog.ancestors([self._rev]):
521 for a in self._repo.changelog.ancestors([self._rev]):
522 yield changectx(self._repo, a)
522 yield changectx(self._repo, a)
523
523
524 def descendants(self):
524 def descendants(self):
525 for d in self._repo.changelog.descendants([self._rev]):
525 for d in self._repo.changelog.descendants([self._rev]):
526 yield changectx(self._repo, d)
526 yield changectx(self._repo, d)
527
527
528 def filectx(self, path, fileid=None, filelog=None):
528 def filectx(self, path, fileid=None, filelog=None):
529 """get a file context from this changeset"""
529 """get a file context from this changeset"""
530 if fileid is None:
530 if fileid is None:
531 fileid = self.filenode(path)
531 fileid = self.filenode(path)
532 return filectx(self._repo, path, fileid=fileid,
532 return filectx(self._repo, path, fileid=fileid,
533 changectx=self, filelog=filelog)
533 changectx=self, filelog=filelog)
534
534
535 def ancestor(self, c2, warn=False):
535 def ancestor(self, c2, warn=False):
536 """return the "best" ancestor context of self and c2
536 """return the "best" ancestor context of self and c2
537
537
538 If there are multiple candidates, it will show a message and check
538 If there are multiple candidates, it will show a message and check
539 merge.preferancestor configuration before falling back to the
539 merge.preferancestor configuration before falling back to the
540 revlog ancestor."""
540 revlog ancestor."""
541 # deal with workingctxs
541 # deal with workingctxs
542 n2 = c2._node
542 n2 = c2._node
543 if n2 is None:
543 if n2 is None:
544 n2 = c2._parents[0]._node
544 n2 = c2._parents[0]._node
545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
545 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
546 if not cahs:
546 if not cahs:
547 anc = nullid
547 anc = nullid
548 elif len(cahs) == 1:
548 elif len(cahs) == 1:
549 anc = cahs[0]
549 anc = cahs[0]
550 else:
550 else:
551 for r in self._repo.ui.configlist('merge', 'preferancestor'):
551 for r in self._repo.ui.configlist('merge', 'preferancestor'):
552 try:
552 try:
553 ctx = changectx(self._repo, r)
553 ctx = changectx(self._repo, r)
554 except error.RepoLookupError:
554 except error.RepoLookupError:
555 continue
555 continue
556 anc = ctx.node()
556 anc = ctx.node()
557 if anc in cahs:
557 if anc in cahs:
558 break
558 break
559 else:
559 else:
560 anc = self._repo.changelog.ancestor(self._node, n2)
560 anc = self._repo.changelog.ancestor(self._node, n2)
561 if warn:
561 if warn:
562 self._repo.ui.status(
562 self._repo.ui.status(
563 (_("note: using %s as ancestor of %s and %s\n") %
563 (_("note: using %s as ancestor of %s and %s\n") %
564 (short(anc), short(self._node), short(n2))) +
564 (short(anc), short(self._node), short(n2))) +
565 ''.join(_(" alternatively, use --config "
565 ''.join(_(" alternatively, use --config "
566 "merge.preferancestor=%s\n") %
566 "merge.preferancestor=%s\n") %
567 short(n) for n in sorted(cahs) if n != anc))
567 short(n) for n in sorted(cahs) if n != anc))
568 return changectx(self._repo, anc)
568 return changectx(self._repo, anc)
569
569
570 def descendant(self, other):
570 def descendant(self, other):
571 """True if other is descendant of this changeset"""
571 """True if other is descendant of this changeset"""
572 return self._repo.changelog.descendant(self._rev, other._rev)
572 return self._repo.changelog.descendant(self._rev, other._rev)
573
573
574 def walk(self, match):
574 def walk(self, match):
575 fset = set(match.files())
575 fset = set(match.files())
576 # for dirstate.walk, files=['.'] means "walk the whole tree".
576 # for dirstate.walk, files=['.'] means "walk the whole tree".
577 # follow that here, too
577 # follow that here, too
578 fset.discard('.')
578 fset.discard('.')
579
579
580 # avoid the entire walk if we're only looking for specific files
580 # avoid the entire walk if we're only looking for specific files
581 if fset and not match.anypats():
581 if fset and not match.anypats():
582 if util.all([fn in self for fn in fset]):
582 if util.all([fn in self for fn in fset]):
583 for fn in sorted(fset):
583 for fn in sorted(fset):
584 if match(fn):
584 if match(fn):
585 yield fn
585 yield fn
586 raise StopIteration
586 raise StopIteration
587
587
588 for fn in self:
588 for fn in self:
589 if fn in fset:
589 if fn in fset:
590 # specified pattern is the exact name
590 # specified pattern is the exact name
591 fset.remove(fn)
591 fset.remove(fn)
592 if match(fn):
592 if match(fn):
593 yield fn
593 yield fn
594 for fn in sorted(fset):
594 for fn in sorted(fset):
595 if fn in self._dirs:
595 if fn in self._dirs:
596 # specified pattern is a directory
596 # specified pattern is a directory
597 continue
597 continue
598 match.bad(fn, _('no such file in rev %s') % self)
598 match.bad(fn, _('no such file in rev %s') % self)
599
599
600 def matches(self, match):
600 def matches(self, match):
601 return self.walk(match)
601 return self.walk(match)
602
602
603 class basefilectx(object):
603 class basefilectx(object):
604 """A filecontext object represents the common logic for its children:
604 """A filecontext object represents the common logic for its children:
605 filectx: read-only access to a filerevision that is already present
605 filectx: read-only access to a filerevision that is already present
606 in the repo,
606 in the repo,
607 workingfilectx: a filecontext that represents files from the working
607 workingfilectx: a filecontext that represents files from the working
608 directory,
608 directory,
609 memfilectx: a filecontext that represents files in-memory."""
609 memfilectx: a filecontext that represents files in-memory."""
610 def __new__(cls, repo, path, *args, **kwargs):
610 def __new__(cls, repo, path, *args, **kwargs):
611 return super(basefilectx, cls).__new__(cls)
611 return super(basefilectx, cls).__new__(cls)
612
612
613 @propertycache
613 @propertycache
614 def _filelog(self):
614 def _filelog(self):
615 return self._repo.file(self._path)
615 return self._repo.file(self._path)
616
616
617 @propertycache
617 @propertycache
618 def _changeid(self):
618 def _changeid(self):
619 if '_changeid' in self.__dict__:
619 if '_changeid' in self.__dict__:
620 return self._changeid
620 return self._changeid
621 elif '_changectx' in self.__dict__:
621 elif '_changectx' in self.__dict__:
622 return self._changectx.rev()
622 return self._changectx.rev()
623 else:
623 else:
624 return self._filelog.linkrev(self._filerev)
624 return self._filelog.linkrev(self._filerev)
625
625
626 @propertycache
626 @propertycache
627 def _filenode(self):
627 def _filenode(self):
628 if '_fileid' in self.__dict__:
628 if '_fileid' in self.__dict__:
629 return self._filelog.lookup(self._fileid)
629 return self._filelog.lookup(self._fileid)
630 else:
630 else:
631 return self._changectx.filenode(self._path)
631 return self._changectx.filenode(self._path)
632
632
633 @propertycache
633 @propertycache
634 def _filerev(self):
634 def _filerev(self):
635 return self._filelog.rev(self._filenode)
635 return self._filelog.rev(self._filenode)
636
636
637 @propertycache
637 @propertycache
638 def _repopath(self):
638 def _repopath(self):
639 return self._path
639 return self._path
640
640
641 def __nonzero__(self):
641 def __nonzero__(self):
642 try:
642 try:
643 self._filenode
643 self._filenode
644 return True
644 return True
645 except error.LookupError:
645 except error.LookupError:
646 # file is missing
646 # file is missing
647 return False
647 return False
648
648
649 def __str__(self):
649 def __str__(self):
650 return "%s@%s" % (self.path(), self._changectx)
650 return "%s@%s" % (self.path(), self._changectx)
651
651
652 def __repr__(self):
652 def __repr__(self):
653 return "<%s %s>" % (type(self).__name__, str(self))
653 return "<%s %s>" % (type(self).__name__, str(self))
654
654
655 def __hash__(self):
655 def __hash__(self):
656 try:
656 try:
657 return hash((self._path, self._filenode))
657 return hash((self._path, self._filenode))
658 except AttributeError:
658 except AttributeError:
659 return id(self)
659 return id(self)
660
660
661 def __eq__(self, other):
661 def __eq__(self, other):
662 try:
662 try:
663 return (type(self) == type(other) and self._path == other._path
663 return (type(self) == type(other) and self._path == other._path
664 and self._filenode == other._filenode)
664 and self._filenode == other._filenode)
665 except AttributeError:
665 except AttributeError:
666 return False
666 return False
667
667
668 def __ne__(self, other):
668 def __ne__(self, other):
669 return not (self == other)
669 return not (self == other)
670
670
671 def filerev(self):
671 def filerev(self):
672 return self._filerev
672 return self._filerev
673 def filenode(self):
673 def filenode(self):
674 return self._filenode
674 return self._filenode
675 def flags(self):
675 def flags(self):
676 return self._changectx.flags(self._path)
676 return self._changectx.flags(self._path)
677 def filelog(self):
677 def filelog(self):
678 return self._filelog
678 return self._filelog
679 def rev(self):
679 def rev(self):
680 return self._changeid
680 return self._changeid
681 def linkrev(self):
681 def linkrev(self):
682 return self._filelog.linkrev(self._filerev)
682 return self._filelog.linkrev(self._filerev)
683 def node(self):
683 def node(self):
684 return self._changectx.node()
684 return self._changectx.node()
685 def hex(self):
685 def hex(self):
686 return self._changectx.hex()
686 return self._changectx.hex()
687 def user(self):
687 def user(self):
688 return self._changectx.user()
688 return self._changectx.user()
689 def date(self):
689 def date(self):
690 return self._changectx.date()
690 return self._changectx.date()
691 def files(self):
691 def files(self):
692 return self._changectx.files()
692 return self._changectx.files()
693 def description(self):
693 def description(self):
694 return self._changectx.description()
694 return self._changectx.description()
695 def branch(self):
695 def branch(self):
696 return self._changectx.branch()
696 return self._changectx.branch()
697 def extra(self):
697 def extra(self):
698 return self._changectx.extra()
698 return self._changectx.extra()
699 def phase(self):
699 def phase(self):
700 return self._changectx.phase()
700 return self._changectx.phase()
701 def phasestr(self):
701 def phasestr(self):
702 return self._changectx.phasestr()
702 return self._changectx.phasestr()
703 def manifest(self):
703 def manifest(self):
704 return self._changectx.manifest()
704 return self._changectx.manifest()
705 def changectx(self):
705 def changectx(self):
706 return self._changectx
706 return self._changectx
707
707
708 def path(self):
708 def path(self):
709 return self._path
709 return self._path
710
710
711 def isbinary(self):
711 def isbinary(self):
712 try:
712 try:
713 return util.binary(self.data())
713 return util.binary(self.data())
714 except IOError:
714 except IOError:
715 return False
715 return False
716 def isexec(self):
716 def isexec(self):
717 return 'x' in self.flags()
717 return 'x' in self.flags()
718 def islink(self):
718 def islink(self):
719 return 'l' in self.flags()
719 return 'l' in self.flags()
720
720
721 def cmp(self, fctx):
721 def cmp(self, fctx):
722 """compare with other file context
722 """compare with other file context
723
723
724 returns True if different than fctx.
724 returns True if different than fctx.
725 """
725 """
726 if (fctx._filerev is None
726 if (fctx._filerev is None
727 and (self._repo._encodefilterpats
727 and (self._repo._encodefilterpats
728 # if file data starts with '\1\n', empty metadata block is
728 # if file data starts with '\1\n', empty metadata block is
729 # prepended, which adds 4 bytes to filelog.size().
729 # prepended, which adds 4 bytes to filelog.size().
730 or self.size() - 4 == fctx.size())
730 or self.size() - 4 == fctx.size())
731 or self.size() == fctx.size()):
731 or self.size() == fctx.size()):
732 return self._filelog.cmp(self._filenode, fctx.data())
732 return self._filelog.cmp(self._filenode, fctx.data())
733
733
734 return True
734 return True
735
735
736 def parents(self):
736 def parents(self):
737 _path = self._path
737 _path = self._path
738 fl = self._filelog
738 fl = self._filelog
739 parents = self._filelog.parents(self._filenode)
739 parents = self._filelog.parents(self._filenode)
740 pl = [(_path, node, fl) for node in parents if node != nullid]
740 pl = [(_path, node, fl) for node in parents if node != nullid]
741
741
742 r = self._filelog.renamed(self._filenode)
742 r = self._filelog.renamed(self._filenode)
743 if r:
743 if r:
744 # - In the simple rename case, both parent are nullid, pl is empty.
744 # - In the simple rename case, both parent are nullid, pl is empty.
745 # - In case of merge, only one of the parent is null id and should
745 # - In case of merge, only one of the parent is null id and should
746 # be replaced with the rename information. This parent is -always-
746 # be replaced with the rename information. This parent is -always-
747 # the first one.
747 # the first one.
748 #
748 #
749 # As null id have alway been filtered out in the previous list
749 # As null id have alway been filtered out in the previous list
750 # comprehension, inserting to 0 will always result in "replacing
750 # comprehension, inserting to 0 will always result in "replacing
751 # first nullid parent with rename information.
751 # first nullid parent with rename information.
752 pl.insert(0, (r[0], r[1], None))
752 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
753
753
754 return [filectx(self._repo, p, fileid=n, filelog=l) for p, n, l in pl]
754 return [filectx(self._repo, p, fileid=n, filelog=l) for p, n, l in pl]
755
755
756 def p1(self):
756 def p1(self):
757 return self.parents()[0]
757 return self.parents()[0]
758
758
759 def p2(self):
759 def p2(self):
760 p = self.parents()
760 p = self.parents()
761 if len(p) == 2:
761 if len(p) == 2:
762 return p[1]
762 return p[1]
763 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
763 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
764
764
765 def annotate(self, follow=False, linenumber=None, diffopts=None):
765 def annotate(self, follow=False, linenumber=None, diffopts=None):
766 '''returns a list of tuples of (ctx, line) for each line
766 '''returns a list of tuples of (ctx, line) for each line
767 in the file, where ctx is the filectx of the node where
767 in the file, where ctx is the filectx of the node where
768 that line was last changed.
768 that line was last changed.
769 This returns tuples of ((ctx, linenumber), line) for each line,
769 This returns tuples of ((ctx, linenumber), line) for each line,
770 if "linenumber" parameter is NOT "None".
770 if "linenumber" parameter is NOT "None".
771 In such tuples, linenumber means one at the first appearance
771 In such tuples, linenumber means one at the first appearance
772 in the managed file.
772 in the managed file.
773 To reduce annotation cost,
773 To reduce annotation cost,
774 this returns fixed value(False is used) as linenumber,
774 this returns fixed value(False is used) as linenumber,
775 if "linenumber" parameter is "False".'''
775 if "linenumber" parameter is "False".'''
776
776
777 if linenumber is None:
777 if linenumber is None:
778 def decorate(text, rev):
778 def decorate(text, rev):
779 return ([rev] * len(text.splitlines()), text)
779 return ([rev] * len(text.splitlines()), text)
780 elif linenumber:
780 elif linenumber:
781 def decorate(text, rev):
781 def decorate(text, rev):
782 size = len(text.splitlines())
782 size = len(text.splitlines())
783 return ([(rev, i) for i in xrange(1, size + 1)], text)
783 return ([(rev, i) for i in xrange(1, size + 1)], text)
784 else:
784 else:
785 def decorate(text, rev):
785 def decorate(text, rev):
786 return ([(rev, False)] * len(text.splitlines()), text)
786 return ([(rev, False)] * len(text.splitlines()), text)
787
787
788 def pair(parent, child):
788 def pair(parent, child):
789 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
789 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
790 refine=True)
790 refine=True)
791 for (a1, a2, b1, b2), t in blocks:
791 for (a1, a2, b1, b2), t in blocks:
792 # Changed blocks ('!') or blocks made only of blank lines ('~')
792 # Changed blocks ('!') or blocks made only of blank lines ('~')
793 # belong to the child.
793 # belong to the child.
794 if t == '=':
794 if t == '=':
795 child[0][b1:b2] = parent[0][a1:a2]
795 child[0][b1:b2] = parent[0][a1:a2]
796 return child
796 return child
797
797
798 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
798 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
799
799
800 def parents(f):
800 def parents(f):
801 pl = f.parents()
801 pl = f.parents()
802
802
803 # Don't return renamed parents if we aren't following.
803 # Don't return renamed parents if we aren't following.
804 if not follow:
804 if not follow:
805 pl = [p for p in pl if p.path() == f.path()]
805 pl = [p for p in pl if p.path() == f.path()]
806
806
807 # renamed filectx won't have a filelog yet, so set it
807 # renamed filectx won't have a filelog yet, so set it
808 # from the cache to save time
808 # from the cache to save time
809 for p in pl:
809 for p in pl:
810 if not '_filelog' in p.__dict__:
810 if not '_filelog' in p.__dict__:
811 p._filelog = getlog(p.path())
811 p._filelog = getlog(p.path())
812
812
813 return pl
813 return pl
814
814
815 # use linkrev to find the first changeset where self appeared
815 # use linkrev to find the first changeset where self appeared
816 if self.rev() != self.linkrev():
816 if self.rev() != self.linkrev():
817 base = self.filectx(self.filenode())
817 base = self.filectx(self.filenode())
818 else:
818 else:
819 base = self
819 base = self
820
820
821 # This algorithm would prefer to be recursive, but Python is a
821 # This algorithm would prefer to be recursive, but Python is a
822 # bit recursion-hostile. Instead we do an iterative
822 # bit recursion-hostile. Instead we do an iterative
823 # depth-first search.
823 # depth-first search.
824
824
825 visit = [base]
825 visit = [base]
826 hist = {}
826 hist = {}
827 pcache = {}
827 pcache = {}
828 needed = {base: 1}
828 needed = {base: 1}
829 while visit:
829 while visit:
830 f = visit[-1]
830 f = visit[-1]
831 pcached = f in pcache
831 pcached = f in pcache
832 if not pcached:
832 if not pcached:
833 pcache[f] = parents(f)
833 pcache[f] = parents(f)
834
834
835 ready = True
835 ready = True
836 pl = pcache[f]
836 pl = pcache[f]
837 for p in pl:
837 for p in pl:
838 if p not in hist:
838 if p not in hist:
839 ready = False
839 ready = False
840 visit.append(p)
840 visit.append(p)
841 if not pcached:
841 if not pcached:
842 needed[p] = needed.get(p, 0) + 1
842 needed[p] = needed.get(p, 0) + 1
843 if ready:
843 if ready:
844 visit.pop()
844 visit.pop()
845 reusable = f in hist
845 reusable = f in hist
846 if reusable:
846 if reusable:
847 curr = hist[f]
847 curr = hist[f]
848 else:
848 else:
849 curr = decorate(f.data(), f)
849 curr = decorate(f.data(), f)
850 for p in pl:
850 for p in pl:
851 if not reusable:
851 if not reusable:
852 curr = pair(hist[p], curr)
852 curr = pair(hist[p], curr)
853 if needed[p] == 1:
853 if needed[p] == 1:
854 del hist[p]
854 del hist[p]
855 del needed[p]
855 del needed[p]
856 else:
856 else:
857 needed[p] -= 1
857 needed[p] -= 1
858
858
859 hist[f] = curr
859 hist[f] = curr
860 pcache[f] = []
860 pcache[f] = []
861
861
862 return zip(hist[base][0], hist[base][1].splitlines(True))
862 return zip(hist[base][0], hist[base][1].splitlines(True))
863
863
864 def ancestors(self, followfirst=False):
864 def ancestors(self, followfirst=False):
865 visit = {}
865 visit = {}
866 c = self
866 c = self
867 cut = followfirst and 1 or None
867 cut = followfirst and 1 or None
868 while True:
868 while True:
869 for parent in c.parents()[:cut]:
869 for parent in c.parents()[:cut]:
870 visit[(parent.rev(), parent.node())] = parent
870 visit[(parent.rev(), parent.node())] = parent
871 if not visit:
871 if not visit:
872 break
872 break
873 c = visit.pop(max(visit))
873 c = visit.pop(max(visit))
874 yield c
874 yield c
875
875
876 class filectx(basefilectx):
876 class filectx(basefilectx):
877 """A filecontext object makes access to data related to a particular
877 """A filecontext object makes access to data related to a particular
878 filerevision convenient."""
878 filerevision convenient."""
879 def __init__(self, repo, path, changeid=None, fileid=None,
879 def __init__(self, repo, path, changeid=None, fileid=None,
880 filelog=None, changectx=None):
880 filelog=None, changectx=None):
881 """changeid can be a changeset revision, node, or tag.
881 """changeid can be a changeset revision, node, or tag.
882 fileid can be a file revision or node."""
882 fileid can be a file revision or node."""
883 self._repo = repo
883 self._repo = repo
884 self._path = path
884 self._path = path
885
885
886 assert (changeid is not None
886 assert (changeid is not None
887 or fileid is not None
887 or fileid is not None
888 or changectx is not None), \
888 or changectx is not None), \
889 ("bad args: changeid=%r, fileid=%r, changectx=%r"
889 ("bad args: changeid=%r, fileid=%r, changectx=%r"
890 % (changeid, fileid, changectx))
890 % (changeid, fileid, changectx))
891
891
892 if filelog is not None:
892 if filelog is not None:
893 self._filelog = filelog
893 self._filelog = filelog
894
894
895 if changeid is not None:
895 if changeid is not None:
896 self._changeid = changeid
896 self._changeid = changeid
897 if changectx is not None:
897 if changectx is not None:
898 self._changectx = changectx
898 self._changectx = changectx
899 if fileid is not None:
899 if fileid is not None:
900 self._fileid = fileid
900 self._fileid = fileid
901
901
902 @propertycache
902 @propertycache
903 def _changectx(self):
903 def _changectx(self):
904 try:
904 try:
905 return changectx(self._repo, self._changeid)
905 return changectx(self._repo, self._changeid)
906 except error.FilteredRepoLookupError:
906 except error.FilteredRepoLookupError:
907 # Linkrev may point to any revision in the repository. When the
907 # Linkrev may point to any revision in the repository. When the
908 # repository is filtered this may lead to `filectx` trying to build
908 # repository is filtered this may lead to `filectx` trying to build
909 # `changectx` for filtered revision. In such case we fallback to
909 # `changectx` for filtered revision. In such case we fallback to
910 # creating `changectx` on the unfiltered version of the reposition.
910 # creating `changectx` on the unfiltered version of the reposition.
911 # This fallback should not be an issue because `changectx` from
911 # This fallback should not be an issue because `changectx` from
912 # `filectx` are not used in complex operations that care about
912 # `filectx` are not used in complex operations that care about
913 # filtering.
913 # filtering.
914 #
914 #
915 # This fallback is a cheap and dirty fix that prevent several
915 # This fallback is a cheap and dirty fix that prevent several
916 # crashes. It does not ensure the behavior is correct. However the
916 # crashes. It does not ensure the behavior is correct. However the
917 # behavior was not correct before filtering either and "incorrect
917 # behavior was not correct before filtering either and "incorrect
918 # behavior" is seen as better as "crash"
918 # behavior" is seen as better as "crash"
919 #
919 #
920 # Linkrevs have several serious troubles with filtering that are
920 # Linkrevs have several serious troubles with filtering that are
921 # complicated to solve. Proper handling of the issue here should be
921 # complicated to solve. Proper handling of the issue here should be
922 # considered when solving linkrev issue are on the table.
922 # considered when solving linkrev issue are on the table.
923 return changectx(self._repo.unfiltered(), self._changeid)
923 return changectx(self._repo.unfiltered(), self._changeid)
924
924
925 def filectx(self, fileid):
925 def filectx(self, fileid):
926 '''opens an arbitrary revision of the file without
926 '''opens an arbitrary revision of the file without
927 opening a new filelog'''
927 opening a new filelog'''
928 return filectx(self._repo, self._path, fileid=fileid,
928 return filectx(self._repo, self._path, fileid=fileid,
929 filelog=self._filelog)
929 filelog=self._filelog)
930
930
931 def data(self):
931 def data(self):
932 try:
932 try:
933 return self._filelog.read(self._filenode)
933 return self._filelog.read(self._filenode)
934 except error.CensoredNodeError:
934 except error.CensoredNodeError:
935 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
935 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
936 return ""
936 return ""
937 raise util.Abort(_("censored node: %s") % short(self._filenode),
937 raise util.Abort(_("censored node: %s") % short(self._filenode),
938 hint=_("set censor.policy to ignore errors"))
938 hint=_("set censor.policy to ignore errors"))
939
939
940 def size(self):
940 def size(self):
941 return self._filelog.size(self._filerev)
941 return self._filelog.size(self._filerev)
942
942
943 def renamed(self):
943 def renamed(self):
944 """check if file was actually renamed in this changeset revision
944 """check if file was actually renamed in this changeset revision
945
945
946 If rename logged in file revision, we report copy for changeset only
946 If rename logged in file revision, we report copy for changeset only
947 if file revisions linkrev points back to the changeset in question
947 if file revisions linkrev points back to the changeset in question
948 or both changeset parents contain different file revisions.
948 or both changeset parents contain different file revisions.
949 """
949 """
950
950
951 renamed = self._filelog.renamed(self._filenode)
951 renamed = self._filelog.renamed(self._filenode)
952 if not renamed:
952 if not renamed:
953 return renamed
953 return renamed
954
954
955 if self.rev() == self.linkrev():
955 if self.rev() == self.linkrev():
956 return renamed
956 return renamed
957
957
958 name = self.path()
958 name = self.path()
959 fnode = self._filenode
959 fnode = self._filenode
960 for p in self._changectx.parents():
960 for p in self._changectx.parents():
961 try:
961 try:
962 if fnode == p.filenode(name):
962 if fnode == p.filenode(name):
963 return None
963 return None
964 except error.LookupError:
964 except error.LookupError:
965 pass
965 pass
966 return renamed
966 return renamed
967
967
968 def children(self):
968 def children(self):
969 # hard for renames
969 # hard for renames
970 c = self._filelog.children(self._filenode)
970 c = self._filelog.children(self._filenode)
971 return [filectx(self._repo, self._path, fileid=x,
971 return [filectx(self._repo, self._path, fileid=x,
972 filelog=self._filelog) for x in c]
972 filelog=self._filelog) for x in c]
973
973
974 class committablectx(basectx):
974 class committablectx(basectx):
975 """A committablectx object provides common functionality for a context that
975 """A committablectx object provides common functionality for a context that
976 wants the ability to commit, e.g. workingctx or memctx."""
976 wants the ability to commit, e.g. workingctx or memctx."""
977 def __init__(self, repo, text="", user=None, date=None, extra=None,
977 def __init__(self, repo, text="", user=None, date=None, extra=None,
978 changes=None):
978 changes=None):
979 self._repo = repo
979 self._repo = repo
980 self._rev = None
980 self._rev = None
981 self._node = None
981 self._node = None
982 self._text = text
982 self._text = text
983 if date:
983 if date:
984 self._date = util.parsedate(date)
984 self._date = util.parsedate(date)
985 if user:
985 if user:
986 self._user = user
986 self._user = user
987 if changes:
987 if changes:
988 self._status = changes
988 self._status = changes
989
989
990 self._extra = {}
990 self._extra = {}
991 if extra:
991 if extra:
992 self._extra = extra.copy()
992 self._extra = extra.copy()
993 if 'branch' not in self._extra:
993 if 'branch' not in self._extra:
994 try:
994 try:
995 branch = encoding.fromlocal(self._repo.dirstate.branch())
995 branch = encoding.fromlocal(self._repo.dirstate.branch())
996 except UnicodeDecodeError:
996 except UnicodeDecodeError:
997 raise util.Abort(_('branch name not in UTF-8!'))
997 raise util.Abort(_('branch name not in UTF-8!'))
998 self._extra['branch'] = branch
998 self._extra['branch'] = branch
999 if self._extra['branch'] == '':
999 if self._extra['branch'] == '':
1000 self._extra['branch'] = 'default'
1000 self._extra['branch'] = 'default'
1001
1001
1002 def __str__(self):
1002 def __str__(self):
1003 return str(self._parents[0]) + "+"
1003 return str(self._parents[0]) + "+"
1004
1004
1005 def __nonzero__(self):
1005 def __nonzero__(self):
1006 return True
1006 return True
1007
1007
1008 def _buildflagfunc(self):
1008 def _buildflagfunc(self):
1009 # Create a fallback function for getting file flags when the
1009 # Create a fallback function for getting file flags when the
1010 # filesystem doesn't support them
1010 # filesystem doesn't support them
1011
1011
1012 copiesget = self._repo.dirstate.copies().get
1012 copiesget = self._repo.dirstate.copies().get
1013
1013
1014 if len(self._parents) < 2:
1014 if len(self._parents) < 2:
1015 # when we have one parent, it's easy: copy from parent
1015 # when we have one parent, it's easy: copy from parent
1016 man = self._parents[0].manifest()
1016 man = self._parents[0].manifest()
1017 def func(f):
1017 def func(f):
1018 f = copiesget(f, f)
1018 f = copiesget(f, f)
1019 return man.flags(f)
1019 return man.flags(f)
1020 else:
1020 else:
1021 # merges are tricky: we try to reconstruct the unstored
1021 # merges are tricky: we try to reconstruct the unstored
1022 # result from the merge (issue1802)
1022 # result from the merge (issue1802)
1023 p1, p2 = self._parents
1023 p1, p2 = self._parents
1024 pa = p1.ancestor(p2)
1024 pa = p1.ancestor(p2)
1025 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1025 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1026
1026
1027 def func(f):
1027 def func(f):
1028 f = copiesget(f, f) # may be wrong for merges with copies
1028 f = copiesget(f, f) # may be wrong for merges with copies
1029 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1029 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1030 if fl1 == fl2:
1030 if fl1 == fl2:
1031 return fl1
1031 return fl1
1032 if fl1 == fla:
1032 if fl1 == fla:
1033 return fl2
1033 return fl2
1034 if fl2 == fla:
1034 if fl2 == fla:
1035 return fl1
1035 return fl1
1036 return '' # punt for conflicts
1036 return '' # punt for conflicts
1037
1037
1038 return func
1038 return func
1039
1039
1040 @propertycache
1040 @propertycache
1041 def _flagfunc(self):
1041 def _flagfunc(self):
1042 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1042 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1043
1043
1044 @propertycache
1044 @propertycache
1045 def _manifest(self):
1045 def _manifest(self):
1046 """generate a manifest corresponding to the values in self._status
1046 """generate a manifest corresponding to the values in self._status
1047
1047
1048 This reuse the file nodeid from parent, but we append an extra letter
1048 This reuse the file nodeid from parent, but we append an extra letter
1049 when modified. Modified files get an extra 'm' while added files get
1049 when modified. Modified files get an extra 'm' while added files get
1050 an extra 'a'. This is used by manifests merge to see that files
1050 an extra 'a'. This is used by manifests merge to see that files
1051 are different and by update logic to avoid deleting newly added files.
1051 are different and by update logic to avoid deleting newly added files.
1052 """
1052 """
1053
1053
1054 man1 = self._parents[0].manifest()
1054 man1 = self._parents[0].manifest()
1055 man = man1.copy()
1055 man = man1.copy()
1056 if len(self._parents) > 1:
1056 if len(self._parents) > 1:
1057 man2 = self.p2().manifest()
1057 man2 = self.p2().manifest()
1058 def getman(f):
1058 def getman(f):
1059 if f in man1:
1059 if f in man1:
1060 return man1
1060 return man1
1061 return man2
1061 return man2
1062 else:
1062 else:
1063 getman = lambda f: man1
1063 getman = lambda f: man1
1064
1064
1065 copied = self._repo.dirstate.copies()
1065 copied = self._repo.dirstate.copies()
1066 ff = self._flagfunc
1066 ff = self._flagfunc
1067 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1067 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1068 for f in l:
1068 for f in l:
1069 orig = copied.get(f, f)
1069 orig = copied.get(f, f)
1070 man[f] = getman(orig).get(orig, nullid) + i
1070 man[f] = getman(orig).get(orig, nullid) + i
1071 try:
1071 try:
1072 man.setflag(f, ff(f))
1072 man.setflag(f, ff(f))
1073 except OSError:
1073 except OSError:
1074 pass
1074 pass
1075
1075
1076 for f in self._status.deleted + self._status.removed:
1076 for f in self._status.deleted + self._status.removed:
1077 if f in man:
1077 if f in man:
1078 del man[f]
1078 del man[f]
1079
1079
1080 return man
1080 return man
1081
1081
1082 @propertycache
1082 @propertycache
1083 def _status(self):
1083 def _status(self):
1084 return self._repo.status()
1084 return self._repo.status()
1085
1085
1086 @propertycache
1086 @propertycache
1087 def _user(self):
1087 def _user(self):
1088 return self._repo.ui.username()
1088 return self._repo.ui.username()
1089
1089
1090 @propertycache
1090 @propertycache
1091 def _date(self):
1091 def _date(self):
1092 return util.makedate()
1092 return util.makedate()
1093
1093
1094 def subrev(self, subpath):
1094 def subrev(self, subpath):
1095 return None
1095 return None
1096
1096
1097 def user(self):
1097 def user(self):
1098 return self._user or self._repo.ui.username()
1098 return self._user or self._repo.ui.username()
1099 def date(self):
1099 def date(self):
1100 return self._date
1100 return self._date
1101 def description(self):
1101 def description(self):
1102 return self._text
1102 return self._text
1103 def files(self):
1103 def files(self):
1104 return sorted(self._status.modified + self._status.added +
1104 return sorted(self._status.modified + self._status.added +
1105 self._status.removed)
1105 self._status.removed)
1106
1106
1107 def modified(self):
1107 def modified(self):
1108 return self._status.modified
1108 return self._status.modified
1109 def added(self):
1109 def added(self):
1110 return self._status.added
1110 return self._status.added
1111 def removed(self):
1111 def removed(self):
1112 return self._status.removed
1112 return self._status.removed
1113 def deleted(self):
1113 def deleted(self):
1114 return self._status.deleted
1114 return self._status.deleted
1115 def unknown(self):
1115 def unknown(self):
1116 return self._status.unknown
1116 return self._status.unknown
1117 def ignored(self):
1117 def ignored(self):
1118 return self._status.ignored
1118 return self._status.ignored
1119 def clean(self):
1119 def clean(self):
1120 return self._status.clean
1120 return self._status.clean
1121 def branch(self):
1121 def branch(self):
1122 return encoding.tolocal(self._extra['branch'])
1122 return encoding.tolocal(self._extra['branch'])
1123 def closesbranch(self):
1123 def closesbranch(self):
1124 return 'close' in self._extra
1124 return 'close' in self._extra
1125 def extra(self):
1125 def extra(self):
1126 return self._extra
1126 return self._extra
1127
1127
1128 def tags(self):
1128 def tags(self):
1129 t = []
1129 t = []
1130 for p in self.parents():
1130 for p in self.parents():
1131 t.extend(p.tags())
1131 t.extend(p.tags())
1132 return t
1132 return t
1133
1133
1134 def bookmarks(self):
1134 def bookmarks(self):
1135 b = []
1135 b = []
1136 for p in self.parents():
1136 for p in self.parents():
1137 b.extend(p.bookmarks())
1137 b.extend(p.bookmarks())
1138 return b
1138 return b
1139
1139
1140 def phase(self):
1140 def phase(self):
1141 phase = phases.draft # default phase to draft
1141 phase = phases.draft # default phase to draft
1142 for p in self.parents():
1142 for p in self.parents():
1143 phase = max(phase, p.phase())
1143 phase = max(phase, p.phase())
1144 return phase
1144 return phase
1145
1145
1146 def hidden(self):
1146 def hidden(self):
1147 return False
1147 return False
1148
1148
1149 def children(self):
1149 def children(self):
1150 return []
1150 return []
1151
1151
1152 def flags(self, path):
1152 def flags(self, path):
1153 if '_manifest' in self.__dict__:
1153 if '_manifest' in self.__dict__:
1154 try:
1154 try:
1155 return self._manifest.flags(path)
1155 return self._manifest.flags(path)
1156 except KeyError:
1156 except KeyError:
1157 return ''
1157 return ''
1158
1158
1159 try:
1159 try:
1160 return self._flagfunc(path)
1160 return self._flagfunc(path)
1161 except OSError:
1161 except OSError:
1162 return ''
1162 return ''
1163
1163
1164 def ancestor(self, c2):
1164 def ancestor(self, c2):
1165 """return the "best" ancestor context of self and c2"""
1165 """return the "best" ancestor context of self and c2"""
1166 return self._parents[0].ancestor(c2) # punt on two parents for now
1166 return self._parents[0].ancestor(c2) # punt on two parents for now
1167
1167
1168 def walk(self, match):
1168 def walk(self, match):
1169 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1169 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1170 True, False))
1170 True, False))
1171
1171
1172 def matches(self, match):
1172 def matches(self, match):
1173 return sorted(self._repo.dirstate.matches(match))
1173 return sorted(self._repo.dirstate.matches(match))
1174
1174
1175 def ancestors(self):
1175 def ancestors(self):
1176 for p in self._parents:
1176 for p in self._parents:
1177 yield p
1177 yield p
1178 for a in self._repo.changelog.ancestors(
1178 for a in self._repo.changelog.ancestors(
1179 [p.rev() for p in self._parents]):
1179 [p.rev() for p in self._parents]):
1180 yield changectx(self._repo, a)
1180 yield changectx(self._repo, a)
1181
1181
1182 def markcommitted(self, node):
1182 def markcommitted(self, node):
1183 """Perform post-commit cleanup necessary after committing this ctx
1183 """Perform post-commit cleanup necessary after committing this ctx
1184
1184
1185 Specifically, this updates backing stores this working context
1185 Specifically, this updates backing stores this working context
1186 wraps to reflect the fact that the changes reflected by this
1186 wraps to reflect the fact that the changes reflected by this
1187 workingctx have been committed. For example, it marks
1187 workingctx have been committed. For example, it marks
1188 modified and added files as normal in the dirstate.
1188 modified and added files as normal in the dirstate.
1189
1189
1190 """
1190 """
1191
1191
1192 self._repo.dirstate.beginparentchange()
1192 self._repo.dirstate.beginparentchange()
1193 for f in self.modified() + self.added():
1193 for f in self.modified() + self.added():
1194 self._repo.dirstate.normal(f)
1194 self._repo.dirstate.normal(f)
1195 for f in self.removed():
1195 for f in self.removed():
1196 self._repo.dirstate.drop(f)
1196 self._repo.dirstate.drop(f)
1197 self._repo.dirstate.setparents(node)
1197 self._repo.dirstate.setparents(node)
1198 self._repo.dirstate.endparentchange()
1198 self._repo.dirstate.endparentchange()
1199
1199
1200 def dirs(self):
1200 def dirs(self):
1201 return self._repo.dirstate.dirs()
1201 return self._repo.dirstate.dirs()
1202
1202
1203 class workingctx(committablectx):
1203 class workingctx(committablectx):
1204 """A workingctx object makes access to data related to
1204 """A workingctx object makes access to data related to
1205 the current working directory convenient.
1205 the current working directory convenient.
1206 date - any valid date string or (unixtime, offset), or None.
1206 date - any valid date string or (unixtime, offset), or None.
1207 user - username string, or None.
1207 user - username string, or None.
1208 extra - a dictionary of extra values, or None.
1208 extra - a dictionary of extra values, or None.
1209 changes - a list of file lists as returned by localrepo.status()
1209 changes - a list of file lists as returned by localrepo.status()
1210 or None to use the repository status.
1210 or None to use the repository status.
1211 """
1211 """
1212 def __init__(self, repo, text="", user=None, date=None, extra=None,
1212 def __init__(self, repo, text="", user=None, date=None, extra=None,
1213 changes=None):
1213 changes=None):
1214 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1214 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1215
1215
1216 def __iter__(self):
1216 def __iter__(self):
1217 d = self._repo.dirstate
1217 d = self._repo.dirstate
1218 for f in d:
1218 for f in d:
1219 if d[f] != 'r':
1219 if d[f] != 'r':
1220 yield f
1220 yield f
1221
1221
1222 def __contains__(self, key):
1222 def __contains__(self, key):
1223 return self._repo.dirstate[key] not in "?r"
1223 return self._repo.dirstate[key] not in "?r"
1224
1224
1225 @propertycache
1225 @propertycache
1226 def _parents(self):
1226 def _parents(self):
1227 p = self._repo.dirstate.parents()
1227 p = self._repo.dirstate.parents()
1228 if p[1] == nullid:
1228 if p[1] == nullid:
1229 p = p[:-1]
1229 p = p[:-1]
1230 return [changectx(self._repo, x) for x in p]
1230 return [changectx(self._repo, x) for x in p]
1231
1231
1232 def filectx(self, path, filelog=None):
1232 def filectx(self, path, filelog=None):
1233 """get a file context from the working directory"""
1233 """get a file context from the working directory"""
1234 return workingfilectx(self._repo, path, workingctx=self,
1234 return workingfilectx(self._repo, path, workingctx=self,
1235 filelog=filelog)
1235 filelog=filelog)
1236
1236
1237 def dirty(self, missing=False, merge=True, branch=True):
1237 def dirty(self, missing=False, merge=True, branch=True):
1238 "check whether a working directory is modified"
1238 "check whether a working directory is modified"
1239 # check subrepos first
1239 # check subrepos first
1240 for s in sorted(self.substate):
1240 for s in sorted(self.substate):
1241 if self.sub(s).dirty():
1241 if self.sub(s).dirty():
1242 return True
1242 return True
1243 # check current working dir
1243 # check current working dir
1244 return ((merge and self.p2()) or
1244 return ((merge and self.p2()) or
1245 (branch and self.branch() != self.p1().branch()) or
1245 (branch and self.branch() != self.p1().branch()) or
1246 self.modified() or self.added() or self.removed() or
1246 self.modified() or self.added() or self.removed() or
1247 (missing and self.deleted()))
1247 (missing and self.deleted()))
1248
1248
1249 def add(self, list, prefix=""):
1249 def add(self, list, prefix=""):
1250 join = lambda f: os.path.join(prefix, f)
1250 join = lambda f: os.path.join(prefix, f)
1251 wlock = self._repo.wlock()
1251 wlock = self._repo.wlock()
1252 ui, ds = self._repo.ui, self._repo.dirstate
1252 ui, ds = self._repo.ui, self._repo.dirstate
1253 try:
1253 try:
1254 rejected = []
1254 rejected = []
1255 lstat = self._repo.wvfs.lstat
1255 lstat = self._repo.wvfs.lstat
1256 for f in list:
1256 for f in list:
1257 scmutil.checkportable(ui, join(f))
1257 scmutil.checkportable(ui, join(f))
1258 try:
1258 try:
1259 st = lstat(f)
1259 st = lstat(f)
1260 except OSError:
1260 except OSError:
1261 ui.warn(_("%s does not exist!\n") % join(f))
1261 ui.warn(_("%s does not exist!\n") % join(f))
1262 rejected.append(f)
1262 rejected.append(f)
1263 continue
1263 continue
1264 if st.st_size > 10000000:
1264 if st.st_size > 10000000:
1265 ui.warn(_("%s: up to %d MB of RAM may be required "
1265 ui.warn(_("%s: up to %d MB of RAM may be required "
1266 "to manage this file\n"
1266 "to manage this file\n"
1267 "(use 'hg revert %s' to cancel the "
1267 "(use 'hg revert %s' to cancel the "
1268 "pending addition)\n")
1268 "pending addition)\n")
1269 % (f, 3 * st.st_size // 1000000, join(f)))
1269 % (f, 3 * st.st_size // 1000000, join(f)))
1270 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1270 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1271 ui.warn(_("%s not added: only files and symlinks "
1271 ui.warn(_("%s not added: only files and symlinks "
1272 "supported currently\n") % join(f))
1272 "supported currently\n") % join(f))
1273 rejected.append(f)
1273 rejected.append(f)
1274 elif ds[f] in 'amn':
1274 elif ds[f] in 'amn':
1275 ui.warn(_("%s already tracked!\n") % join(f))
1275 ui.warn(_("%s already tracked!\n") % join(f))
1276 elif ds[f] == 'r':
1276 elif ds[f] == 'r':
1277 ds.normallookup(f)
1277 ds.normallookup(f)
1278 else:
1278 else:
1279 ds.add(f)
1279 ds.add(f)
1280 return rejected
1280 return rejected
1281 finally:
1281 finally:
1282 wlock.release()
1282 wlock.release()
1283
1283
1284 def forget(self, files, prefix=""):
1284 def forget(self, files, prefix=""):
1285 join = lambda f: os.path.join(prefix, f)
1285 join = lambda f: os.path.join(prefix, f)
1286 wlock = self._repo.wlock()
1286 wlock = self._repo.wlock()
1287 try:
1287 try:
1288 rejected = []
1288 rejected = []
1289 for f in files:
1289 for f in files:
1290 if f not in self._repo.dirstate:
1290 if f not in self._repo.dirstate:
1291 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1291 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1292 rejected.append(f)
1292 rejected.append(f)
1293 elif self._repo.dirstate[f] != 'a':
1293 elif self._repo.dirstate[f] != 'a':
1294 self._repo.dirstate.remove(f)
1294 self._repo.dirstate.remove(f)
1295 else:
1295 else:
1296 self._repo.dirstate.drop(f)
1296 self._repo.dirstate.drop(f)
1297 return rejected
1297 return rejected
1298 finally:
1298 finally:
1299 wlock.release()
1299 wlock.release()
1300
1300
1301 def undelete(self, list):
1301 def undelete(self, list):
1302 pctxs = self.parents()
1302 pctxs = self.parents()
1303 wlock = self._repo.wlock()
1303 wlock = self._repo.wlock()
1304 try:
1304 try:
1305 for f in list:
1305 for f in list:
1306 if self._repo.dirstate[f] != 'r':
1306 if self._repo.dirstate[f] != 'r':
1307 self._repo.ui.warn(_("%s not removed!\n") % f)
1307 self._repo.ui.warn(_("%s not removed!\n") % f)
1308 else:
1308 else:
1309 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1309 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1310 t = fctx.data()
1310 t = fctx.data()
1311 self._repo.wwrite(f, t, fctx.flags())
1311 self._repo.wwrite(f, t, fctx.flags())
1312 self._repo.dirstate.normal(f)
1312 self._repo.dirstate.normal(f)
1313 finally:
1313 finally:
1314 wlock.release()
1314 wlock.release()
1315
1315
1316 def copy(self, source, dest):
1316 def copy(self, source, dest):
1317 try:
1317 try:
1318 st = self._repo.wvfs.lstat(dest)
1318 st = self._repo.wvfs.lstat(dest)
1319 except OSError, err:
1319 except OSError, err:
1320 if err.errno != errno.ENOENT:
1320 if err.errno != errno.ENOENT:
1321 raise
1321 raise
1322 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1322 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1323 return
1323 return
1324 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1324 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1325 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1325 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1326 "symbolic link\n") % dest)
1326 "symbolic link\n") % dest)
1327 else:
1327 else:
1328 wlock = self._repo.wlock()
1328 wlock = self._repo.wlock()
1329 try:
1329 try:
1330 if self._repo.dirstate[dest] in '?':
1330 if self._repo.dirstate[dest] in '?':
1331 self._repo.dirstate.add(dest)
1331 self._repo.dirstate.add(dest)
1332 elif self._repo.dirstate[dest] in 'r':
1332 elif self._repo.dirstate[dest] in 'r':
1333 self._repo.dirstate.normallookup(dest)
1333 self._repo.dirstate.normallookup(dest)
1334 self._repo.dirstate.copy(source, dest)
1334 self._repo.dirstate.copy(source, dest)
1335 finally:
1335 finally:
1336 wlock.release()
1336 wlock.release()
1337
1337
1338 def _filtersuspectsymlink(self, files):
1338 def _filtersuspectsymlink(self, files):
1339 if not files or self._repo.dirstate._checklink:
1339 if not files or self._repo.dirstate._checklink:
1340 return files
1340 return files
1341
1341
1342 # Symlink placeholders may get non-symlink-like contents
1342 # Symlink placeholders may get non-symlink-like contents
1343 # via user error or dereferencing by NFS or Samba servers,
1343 # via user error or dereferencing by NFS or Samba servers,
1344 # so we filter out any placeholders that don't look like a
1344 # so we filter out any placeholders that don't look like a
1345 # symlink
1345 # symlink
1346 sane = []
1346 sane = []
1347 for f in files:
1347 for f in files:
1348 if self.flags(f) == 'l':
1348 if self.flags(f) == 'l':
1349 d = self[f].data()
1349 d = self[f].data()
1350 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1350 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1351 self._repo.ui.debug('ignoring suspect symlink placeholder'
1351 self._repo.ui.debug('ignoring suspect symlink placeholder'
1352 ' "%s"\n' % f)
1352 ' "%s"\n' % f)
1353 continue
1353 continue
1354 sane.append(f)
1354 sane.append(f)
1355 return sane
1355 return sane
1356
1356
1357 def _checklookup(self, files):
1357 def _checklookup(self, files):
1358 # check for any possibly clean files
1358 # check for any possibly clean files
1359 if not files:
1359 if not files:
1360 return [], []
1360 return [], []
1361
1361
1362 modified = []
1362 modified = []
1363 fixup = []
1363 fixup = []
1364 pctx = self._parents[0]
1364 pctx = self._parents[0]
1365 # do a full compare of any files that might have changed
1365 # do a full compare of any files that might have changed
1366 for f in sorted(files):
1366 for f in sorted(files):
1367 if (f not in pctx or self.flags(f) != pctx.flags(f)
1367 if (f not in pctx or self.flags(f) != pctx.flags(f)
1368 or pctx[f].cmp(self[f])):
1368 or pctx[f].cmp(self[f])):
1369 modified.append(f)
1369 modified.append(f)
1370 else:
1370 else:
1371 fixup.append(f)
1371 fixup.append(f)
1372
1372
1373 # update dirstate for files that are actually clean
1373 # update dirstate for files that are actually clean
1374 if fixup:
1374 if fixup:
1375 try:
1375 try:
1376 # updating the dirstate is optional
1376 # updating the dirstate is optional
1377 # so we don't wait on the lock
1377 # so we don't wait on the lock
1378 # wlock can invalidate the dirstate, so cache normal _after_
1378 # wlock can invalidate the dirstate, so cache normal _after_
1379 # taking the lock
1379 # taking the lock
1380 wlock = self._repo.wlock(False)
1380 wlock = self._repo.wlock(False)
1381 normal = self._repo.dirstate.normal
1381 normal = self._repo.dirstate.normal
1382 try:
1382 try:
1383 for f in fixup:
1383 for f in fixup:
1384 normal(f)
1384 normal(f)
1385 finally:
1385 finally:
1386 wlock.release()
1386 wlock.release()
1387 except error.LockError:
1387 except error.LockError:
1388 pass
1388 pass
1389 return modified, fixup
1389 return modified, fixup
1390
1390
1391 def _manifestmatches(self, match, s):
1391 def _manifestmatches(self, match, s):
1392 """Slow path for workingctx
1392 """Slow path for workingctx
1393
1393
1394 The fast path is when we compare the working directory to its parent
1394 The fast path is when we compare the working directory to its parent
1395 which means this function is comparing with a non-parent; therefore we
1395 which means this function is comparing with a non-parent; therefore we
1396 need to build a manifest and return what matches.
1396 need to build a manifest and return what matches.
1397 """
1397 """
1398 mf = self._repo['.']._manifestmatches(match, s)
1398 mf = self._repo['.']._manifestmatches(match, s)
1399 for f in s.modified + s.added:
1399 for f in s.modified + s.added:
1400 mf[f] = _newnode
1400 mf[f] = _newnode
1401 mf.setflag(f, self.flags(f))
1401 mf.setflag(f, self.flags(f))
1402 for f in s.removed:
1402 for f in s.removed:
1403 if f in mf:
1403 if f in mf:
1404 del mf[f]
1404 del mf[f]
1405 return mf
1405 return mf
1406
1406
1407 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1407 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1408 unknown=False):
1408 unknown=False):
1409 '''Gets the status from the dirstate -- internal use only.'''
1409 '''Gets the status from the dirstate -- internal use only.'''
1410 listignored, listclean, listunknown = ignored, clean, unknown
1410 listignored, listclean, listunknown = ignored, clean, unknown
1411 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1411 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1412 subrepos = []
1412 subrepos = []
1413 if '.hgsub' in self:
1413 if '.hgsub' in self:
1414 subrepos = sorted(self.substate)
1414 subrepos = sorted(self.substate)
1415 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1415 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1416 listclean, listunknown)
1416 listclean, listunknown)
1417
1417
1418 # check for any possibly clean files
1418 # check for any possibly clean files
1419 if cmp:
1419 if cmp:
1420 modified2, fixup = self._checklookup(cmp)
1420 modified2, fixup = self._checklookup(cmp)
1421 s.modified.extend(modified2)
1421 s.modified.extend(modified2)
1422
1422
1423 # update dirstate for files that are actually clean
1423 # update dirstate for files that are actually clean
1424 if fixup and listclean:
1424 if fixup and listclean:
1425 s.clean.extend(fixup)
1425 s.clean.extend(fixup)
1426
1426
1427 return s
1427 return s
1428
1428
1429 def _buildstatus(self, other, s, match, listignored, listclean,
1429 def _buildstatus(self, other, s, match, listignored, listclean,
1430 listunknown):
1430 listunknown):
1431 """build a status with respect to another context
1431 """build a status with respect to another context
1432
1432
1433 This includes logic for maintaining the fast path of status when
1433 This includes logic for maintaining the fast path of status when
1434 comparing the working directory against its parent, which is to skip
1434 comparing the working directory against its parent, which is to skip
1435 building a new manifest if self (working directory) is not comparing
1435 building a new manifest if self (working directory) is not comparing
1436 against its parent (repo['.']).
1436 against its parent (repo['.']).
1437 """
1437 """
1438 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1438 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1439 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1439 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1440 # might have accidentally ended up with the entire contents of the file
1440 # might have accidentally ended up with the entire contents of the file
1441 # they are supposed to be linking to.
1441 # they are supposed to be linking to.
1442 s.modified[:] = self._filtersuspectsymlink(s.modified)
1442 s.modified[:] = self._filtersuspectsymlink(s.modified)
1443 if other != self._repo['.']:
1443 if other != self._repo['.']:
1444 s = super(workingctx, self)._buildstatus(other, s, match,
1444 s = super(workingctx, self)._buildstatus(other, s, match,
1445 listignored, listclean,
1445 listignored, listclean,
1446 listunknown)
1446 listunknown)
1447 self._status = s
1447 self._status = s
1448 return s
1448 return s
1449
1449
1450 def _matchstatus(self, other, match):
1450 def _matchstatus(self, other, match):
1451 """override the match method with a filter for directory patterns
1451 """override the match method with a filter for directory patterns
1452
1452
1453 We use inheritance to customize the match.bad method only in cases of
1453 We use inheritance to customize the match.bad method only in cases of
1454 workingctx since it belongs only to the working directory when
1454 workingctx since it belongs only to the working directory when
1455 comparing against the parent changeset.
1455 comparing against the parent changeset.
1456
1456
1457 If we aren't comparing against the working directory's parent, then we
1457 If we aren't comparing against the working directory's parent, then we
1458 just use the default match object sent to us.
1458 just use the default match object sent to us.
1459 """
1459 """
1460 superself = super(workingctx, self)
1460 superself = super(workingctx, self)
1461 match = superself._matchstatus(other, match)
1461 match = superself._matchstatus(other, match)
1462 if other != self._repo['.']:
1462 if other != self._repo['.']:
1463 def bad(f, msg):
1463 def bad(f, msg):
1464 # 'f' may be a directory pattern from 'match.files()',
1464 # 'f' may be a directory pattern from 'match.files()',
1465 # so 'f not in ctx1' is not enough
1465 # so 'f not in ctx1' is not enough
1466 if f not in other and f not in other.dirs():
1466 if f not in other and f not in other.dirs():
1467 self._repo.ui.warn('%s: %s\n' %
1467 self._repo.ui.warn('%s: %s\n' %
1468 (self._repo.dirstate.pathto(f), msg))
1468 (self._repo.dirstate.pathto(f), msg))
1469 match.bad = bad
1469 match.bad = bad
1470 return match
1470 return match
1471
1471
1472 class committablefilectx(basefilectx):
1472 class committablefilectx(basefilectx):
1473 """A committablefilectx provides common functionality for a file context
1473 """A committablefilectx provides common functionality for a file context
1474 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1474 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1475 def __init__(self, repo, path, filelog=None, ctx=None):
1475 def __init__(self, repo, path, filelog=None, ctx=None):
1476 self._repo = repo
1476 self._repo = repo
1477 self._path = path
1477 self._path = path
1478 self._changeid = None
1478 self._changeid = None
1479 self._filerev = self._filenode = None
1479 self._filerev = self._filenode = None
1480
1480
1481 if filelog is not None:
1481 if filelog is not None:
1482 self._filelog = filelog
1482 self._filelog = filelog
1483 if ctx:
1483 if ctx:
1484 self._changectx = ctx
1484 self._changectx = ctx
1485
1485
1486 def __nonzero__(self):
1486 def __nonzero__(self):
1487 return True
1487 return True
1488
1488
1489 def parents(self):
1489 def parents(self):
1490 '''return parent filectxs, following copies if necessary'''
1490 '''return parent filectxs, following copies if necessary'''
1491 def filenode(ctx, path):
1491 def filenode(ctx, path):
1492 return ctx._manifest.get(path, nullid)
1492 return ctx._manifest.get(path, nullid)
1493
1493
1494 path = self._path
1494 path = self._path
1495 fl = self._filelog
1495 fl = self._filelog
1496 pcl = self._changectx._parents
1496 pcl = self._changectx._parents
1497 renamed = self.renamed()
1497 renamed = self.renamed()
1498
1498
1499 if renamed:
1499 if renamed:
1500 pl = [renamed + (None,)]
1500 pl = [renamed + (None,)]
1501 else:
1501 else:
1502 pl = [(path, filenode(pcl[0], path), fl)]
1502 pl = [(path, filenode(pcl[0], path), fl)]
1503
1503
1504 for pc in pcl[1:]:
1504 for pc in pcl[1:]:
1505 pl.append((path, filenode(pc, path), fl))
1505 pl.append((path, filenode(pc, path), fl))
1506
1506
1507 return [filectx(self._repo, p, fileid=n, filelog=l)
1507 return [filectx(self._repo, p, fileid=n, filelog=l)
1508 for p, n, l in pl if n != nullid]
1508 for p, n, l in pl if n != nullid]
1509
1509
1510 def children(self):
1510 def children(self):
1511 return []
1511 return []
1512
1512
1513 class workingfilectx(committablefilectx):
1513 class workingfilectx(committablefilectx):
1514 """A workingfilectx object makes access to data related to a particular
1514 """A workingfilectx object makes access to data related to a particular
1515 file in the working directory convenient."""
1515 file in the working directory convenient."""
1516 def __init__(self, repo, path, filelog=None, workingctx=None):
1516 def __init__(self, repo, path, filelog=None, workingctx=None):
1517 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1517 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1518
1518
1519 @propertycache
1519 @propertycache
1520 def _changectx(self):
1520 def _changectx(self):
1521 return workingctx(self._repo)
1521 return workingctx(self._repo)
1522
1522
1523 def data(self):
1523 def data(self):
1524 return self._repo.wread(self._path)
1524 return self._repo.wread(self._path)
1525 def renamed(self):
1525 def renamed(self):
1526 rp = self._repo.dirstate.copied(self._path)
1526 rp = self._repo.dirstate.copied(self._path)
1527 if not rp:
1527 if not rp:
1528 return None
1528 return None
1529 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1529 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1530
1530
1531 def size(self):
1531 def size(self):
1532 return self._repo.wvfs.lstat(self._path).st_size
1532 return self._repo.wvfs.lstat(self._path).st_size
1533 def date(self):
1533 def date(self):
1534 t, tz = self._changectx.date()
1534 t, tz = self._changectx.date()
1535 try:
1535 try:
1536 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1536 return (int(self._repo.wvfs.lstat(self._path).st_mtime), tz)
1537 except OSError, err:
1537 except OSError, err:
1538 if err.errno != errno.ENOENT:
1538 if err.errno != errno.ENOENT:
1539 raise
1539 raise
1540 return (t, tz)
1540 return (t, tz)
1541
1541
1542 def cmp(self, fctx):
1542 def cmp(self, fctx):
1543 """compare with other file context
1543 """compare with other file context
1544
1544
1545 returns True if different than fctx.
1545 returns True if different than fctx.
1546 """
1546 """
1547 # fctx should be a filectx (not a workingfilectx)
1547 # fctx should be a filectx (not a workingfilectx)
1548 # invert comparison to reuse the same code path
1548 # invert comparison to reuse the same code path
1549 return fctx.cmp(self)
1549 return fctx.cmp(self)
1550
1550
1551 def remove(self, ignoremissing=False):
1551 def remove(self, ignoremissing=False):
1552 """wraps unlink for a repo's working directory"""
1552 """wraps unlink for a repo's working directory"""
1553 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1553 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1554
1554
1555 def write(self, data, flags):
1555 def write(self, data, flags):
1556 """wraps repo.wwrite"""
1556 """wraps repo.wwrite"""
1557 self._repo.wwrite(self._path, data, flags)
1557 self._repo.wwrite(self._path, data, flags)
1558
1558
1559 class memctx(committablectx):
1559 class memctx(committablectx):
1560 """Use memctx to perform in-memory commits via localrepo.commitctx().
1560 """Use memctx to perform in-memory commits via localrepo.commitctx().
1561
1561
1562 Revision information is supplied at initialization time while
1562 Revision information is supplied at initialization time while
1563 related files data and is made available through a callback
1563 related files data and is made available through a callback
1564 mechanism. 'repo' is the current localrepo, 'parents' is a
1564 mechanism. 'repo' is the current localrepo, 'parents' is a
1565 sequence of two parent revisions identifiers (pass None for every
1565 sequence of two parent revisions identifiers (pass None for every
1566 missing parent), 'text' is the commit message and 'files' lists
1566 missing parent), 'text' is the commit message and 'files' lists
1567 names of files touched by the revision (normalized and relative to
1567 names of files touched by the revision (normalized and relative to
1568 repository root).
1568 repository root).
1569
1569
1570 filectxfn(repo, memctx, path) is a callable receiving the
1570 filectxfn(repo, memctx, path) is a callable receiving the
1571 repository, the current memctx object and the normalized path of
1571 repository, the current memctx object and the normalized path of
1572 requested file, relative to repository root. It is fired by the
1572 requested file, relative to repository root. It is fired by the
1573 commit function for every file in 'files', but calls order is
1573 commit function for every file in 'files', but calls order is
1574 undefined. If the file is available in the revision being
1574 undefined. If the file is available in the revision being
1575 committed (updated or added), filectxfn returns a memfilectx
1575 committed (updated or added), filectxfn returns a memfilectx
1576 object. If the file was removed, filectxfn raises an
1576 object. If the file was removed, filectxfn raises an
1577 IOError. Moved files are represented by marking the source file
1577 IOError. Moved files are represented by marking the source file
1578 removed and the new file added with copy information (see
1578 removed and the new file added with copy information (see
1579 memfilectx).
1579 memfilectx).
1580
1580
1581 user receives the committer name and defaults to current
1581 user receives the committer name and defaults to current
1582 repository username, date is the commit date in any format
1582 repository username, date is the commit date in any format
1583 supported by util.parsedate() and defaults to current date, extra
1583 supported by util.parsedate() and defaults to current date, extra
1584 is a dictionary of metadata or is left empty.
1584 is a dictionary of metadata or is left empty.
1585 """
1585 """
1586
1586
1587 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1587 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1588 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1588 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1589 # this field to determine what to do in filectxfn.
1589 # this field to determine what to do in filectxfn.
1590 _returnnoneformissingfiles = True
1590 _returnnoneformissingfiles = True
1591
1591
1592 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1592 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1593 date=None, extra=None, editor=False):
1593 date=None, extra=None, editor=False):
1594 super(memctx, self).__init__(repo, text, user, date, extra)
1594 super(memctx, self).__init__(repo, text, user, date, extra)
1595 self._rev = None
1595 self._rev = None
1596 self._node = None
1596 self._node = None
1597 parents = [(p or nullid) for p in parents]
1597 parents = [(p or nullid) for p in parents]
1598 p1, p2 = parents
1598 p1, p2 = parents
1599 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1599 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1600 files = sorted(set(files))
1600 files = sorted(set(files))
1601 self._files = files
1601 self._files = files
1602 self.substate = {}
1602 self.substate = {}
1603
1603
1604 # if store is not callable, wrap it in a function
1604 # if store is not callable, wrap it in a function
1605 if not callable(filectxfn):
1605 if not callable(filectxfn):
1606 def getfilectx(repo, memctx, path):
1606 def getfilectx(repo, memctx, path):
1607 fctx = filectxfn[path]
1607 fctx = filectxfn[path]
1608 # this is weird but apparently we only keep track of one parent
1608 # this is weird but apparently we only keep track of one parent
1609 # (why not only store that instead of a tuple?)
1609 # (why not only store that instead of a tuple?)
1610 copied = fctx.renamed()
1610 copied = fctx.renamed()
1611 if copied:
1611 if copied:
1612 copied = copied[0]
1612 copied = copied[0]
1613 return memfilectx(repo, path, fctx.data(),
1613 return memfilectx(repo, path, fctx.data(),
1614 islink=fctx.islink(), isexec=fctx.isexec(),
1614 islink=fctx.islink(), isexec=fctx.isexec(),
1615 copied=copied, memctx=memctx)
1615 copied=copied, memctx=memctx)
1616 self._filectxfn = getfilectx
1616 self._filectxfn = getfilectx
1617 else:
1617 else:
1618 # "util.cachefunc" reduces invocation of possibly expensive
1618 # "util.cachefunc" reduces invocation of possibly expensive
1619 # "filectxfn" for performance (e.g. converting from another VCS)
1619 # "filectxfn" for performance (e.g. converting from another VCS)
1620 self._filectxfn = util.cachefunc(filectxfn)
1620 self._filectxfn = util.cachefunc(filectxfn)
1621
1621
1622 self._extra = extra and extra.copy() or {}
1622 self._extra = extra and extra.copy() or {}
1623 if self._extra.get('branch', '') == '':
1623 if self._extra.get('branch', '') == '':
1624 self._extra['branch'] = 'default'
1624 self._extra['branch'] = 'default'
1625
1625
1626 if editor:
1626 if editor:
1627 self._text = editor(self._repo, self, [])
1627 self._text = editor(self._repo, self, [])
1628 self._repo.savecommitmessage(self._text)
1628 self._repo.savecommitmessage(self._text)
1629
1629
1630 def filectx(self, path, filelog=None):
1630 def filectx(self, path, filelog=None):
1631 """get a file context from the working directory
1631 """get a file context from the working directory
1632
1632
1633 Returns None if file doesn't exist and should be removed."""
1633 Returns None if file doesn't exist and should be removed."""
1634 return self._filectxfn(self._repo, self, path)
1634 return self._filectxfn(self._repo, self, path)
1635
1635
1636 def commit(self):
1636 def commit(self):
1637 """commit context to the repo"""
1637 """commit context to the repo"""
1638 return self._repo.commitctx(self)
1638 return self._repo.commitctx(self)
1639
1639
1640 @propertycache
1640 @propertycache
1641 def _manifest(self):
1641 def _manifest(self):
1642 """generate a manifest based on the return values of filectxfn"""
1642 """generate a manifest based on the return values of filectxfn"""
1643
1643
1644 # keep this simple for now; just worry about p1
1644 # keep this simple for now; just worry about p1
1645 pctx = self._parents[0]
1645 pctx = self._parents[0]
1646 man = pctx.manifest().copy()
1646 man = pctx.manifest().copy()
1647
1647
1648 for f in self._status.modified:
1648 for f in self._status.modified:
1649 p1node = nullid
1649 p1node = nullid
1650 p2node = nullid
1650 p2node = nullid
1651 p = pctx[f].parents() # if file isn't in pctx, check p2?
1651 p = pctx[f].parents() # if file isn't in pctx, check p2?
1652 if len(p) > 0:
1652 if len(p) > 0:
1653 p1node = p[0].node()
1653 p1node = p[0].node()
1654 if len(p) > 1:
1654 if len(p) > 1:
1655 p2node = p[1].node()
1655 p2node = p[1].node()
1656 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1656 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1657
1657
1658 for f in self._status.added:
1658 for f in self._status.added:
1659 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1659 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1660
1660
1661 for f in self._status.removed:
1661 for f in self._status.removed:
1662 if f in man:
1662 if f in man:
1663 del man[f]
1663 del man[f]
1664
1664
1665 return man
1665 return man
1666
1666
1667 @propertycache
1667 @propertycache
1668 def _status(self):
1668 def _status(self):
1669 """Calculate exact status from ``files`` specified at construction
1669 """Calculate exact status from ``files`` specified at construction
1670 """
1670 """
1671 man1 = self.p1().manifest()
1671 man1 = self.p1().manifest()
1672 p2 = self._parents[1]
1672 p2 = self._parents[1]
1673 # "1 < len(self._parents)" can't be used for checking
1673 # "1 < len(self._parents)" can't be used for checking
1674 # existence of the 2nd parent, because "memctx._parents" is
1674 # existence of the 2nd parent, because "memctx._parents" is
1675 # explicitly initialized by the list, of which length is 2.
1675 # explicitly initialized by the list, of which length is 2.
1676 if p2.node() != nullid:
1676 if p2.node() != nullid:
1677 man2 = p2.manifest()
1677 man2 = p2.manifest()
1678 managing = lambda f: f in man1 or f in man2
1678 managing = lambda f: f in man1 or f in man2
1679 else:
1679 else:
1680 managing = lambda f: f in man1
1680 managing = lambda f: f in man1
1681
1681
1682 modified, added, removed = [], [], []
1682 modified, added, removed = [], [], []
1683 for f in self._files:
1683 for f in self._files:
1684 if not managing(f):
1684 if not managing(f):
1685 added.append(f)
1685 added.append(f)
1686 elif self[f]:
1686 elif self[f]:
1687 modified.append(f)
1687 modified.append(f)
1688 else:
1688 else:
1689 removed.append(f)
1689 removed.append(f)
1690
1690
1691 return scmutil.status(modified, added, removed, [], [], [], [])
1691 return scmutil.status(modified, added, removed, [], [], [], [])
1692
1692
1693 class memfilectx(committablefilectx):
1693 class memfilectx(committablefilectx):
1694 """memfilectx represents an in-memory file to commit.
1694 """memfilectx represents an in-memory file to commit.
1695
1695
1696 See memctx and committablefilectx for more details.
1696 See memctx and committablefilectx for more details.
1697 """
1697 """
1698 def __init__(self, repo, path, data, islink=False,
1698 def __init__(self, repo, path, data, islink=False,
1699 isexec=False, copied=None, memctx=None):
1699 isexec=False, copied=None, memctx=None):
1700 """
1700 """
1701 path is the normalized file path relative to repository root.
1701 path is the normalized file path relative to repository root.
1702 data is the file content as a string.
1702 data is the file content as a string.
1703 islink is True if the file is a symbolic link.
1703 islink is True if the file is a symbolic link.
1704 isexec is True if the file is executable.
1704 isexec is True if the file is executable.
1705 copied is the source file path if current file was copied in the
1705 copied is the source file path if current file was copied in the
1706 revision being committed, or None."""
1706 revision being committed, or None."""
1707 super(memfilectx, self).__init__(repo, path, None, memctx)
1707 super(memfilectx, self).__init__(repo, path, None, memctx)
1708 self._data = data
1708 self._data = data
1709 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1709 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1710 self._copied = None
1710 self._copied = None
1711 if copied:
1711 if copied:
1712 self._copied = (copied, nullid)
1712 self._copied = (copied, nullid)
1713
1713
1714 def data(self):
1714 def data(self):
1715 return self._data
1715 return self._data
1716 def size(self):
1716 def size(self):
1717 return len(self.data())
1717 return len(self.data())
1718 def flags(self):
1718 def flags(self):
1719 return self._flags
1719 return self._flags
1720 def renamed(self):
1720 def renamed(self):
1721 return self._copied
1721 return self._copied
1722
1722
1723 def remove(self, ignoremissing=False):
1723 def remove(self, ignoremissing=False):
1724 """wraps unlink for a repo's working directory"""
1724 """wraps unlink for a repo's working directory"""
1725 # need to figure out what to do here
1725 # need to figure out what to do here
1726 del self._changectx[self._path]
1726 del self._changectx[self._path]
1727
1727
1728 def write(self, data, flags):
1728 def write(self, data, flags):
1729 """wraps repo.wwrite"""
1729 """wraps repo.wwrite"""
1730 self._data = data
1730 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now