##// END OF EJS Templates
context: optimize _parents()...
Gregory Szorc -
r27063:37e1fdcb default
parent child Browse files
Show More
@@ -1,1944 +1,1945
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import re
8 import re
9
9
10 from node import nullid, nullrev, wdirid, short, hex, bin
10 from node import nullid, nullrev, wdirid, short, hex, bin
11 from i18n import _
11 from i18n import _
12 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
12 import mdiff, error, util, scmutil, subrepo, patch, encoding, phases
13 import match as matchmod
13 import match as matchmod
14 import os, errno, stat
14 import os, errno, stat
15 import obsolete as obsmod
15 import obsolete as obsmod
16 import repoview
16 import repoview
17 import fileset
17 import fileset
18 import revlog
18 import revlog
19
19
20 propertycache = util.propertycache
20 propertycache = util.propertycache
21
21
22 # Phony node value to stand-in for new files in some uses of
22 # Phony node value to stand-in for new files in some uses of
23 # manifests. Manifests support 21-byte hashes for nodes which are
23 # manifests. Manifests support 21-byte hashes for nodes which are
24 # dirty in the working copy.
24 # dirty in the working copy.
25 _newnode = '!' * 21
25 _newnode = '!' * 21
26
26
27 nonascii = re.compile(r'[^\x21-\x7f]').search
27 nonascii = re.compile(r'[^\x21-\x7f]').search
28
28
29 class basectx(object):
29 class basectx(object):
30 """A basectx object represents the common logic for its children:
30 """A basectx object represents the common logic for its children:
31 changectx: read-only context that is already present in the repo,
31 changectx: read-only context that is already present in the repo,
32 workingctx: a context that represents the working directory and can
32 workingctx: a context that represents the working directory and can
33 be committed,
33 be committed,
34 memctx: a context that represents changes in-memory and can also
34 memctx: a context that represents changes in-memory and can also
35 be committed."""
35 be committed."""
36 def __new__(cls, repo, changeid='', *args, **kwargs):
36 def __new__(cls, repo, changeid='', *args, **kwargs):
37 if isinstance(changeid, basectx):
37 if isinstance(changeid, basectx):
38 return changeid
38 return changeid
39
39
40 o = super(basectx, cls).__new__(cls)
40 o = super(basectx, cls).__new__(cls)
41
41
42 o._repo = repo
42 o._repo = repo
43 o._rev = nullrev
43 o._rev = nullrev
44 o._node = nullid
44 o._node = nullid
45
45
46 return o
46 return o
47
47
48 def __str__(self):
48 def __str__(self):
49 return short(self.node())
49 return short(self.node())
50
50
51 def __int__(self):
51 def __int__(self):
52 return self.rev()
52 return self.rev()
53
53
54 def __repr__(self):
54 def __repr__(self):
55 return "<%s %s>" % (type(self).__name__, str(self))
55 return "<%s %s>" % (type(self).__name__, str(self))
56
56
57 def __eq__(self, other):
57 def __eq__(self, other):
58 try:
58 try:
59 return type(self) == type(other) and self._rev == other._rev
59 return type(self) == type(other) and self._rev == other._rev
60 except AttributeError:
60 except AttributeError:
61 return False
61 return False
62
62
63 def __ne__(self, other):
63 def __ne__(self, other):
64 return not (self == other)
64 return not (self == other)
65
65
66 def __contains__(self, key):
66 def __contains__(self, key):
67 return key in self._manifest
67 return key in self._manifest
68
68
69 def __getitem__(self, key):
69 def __getitem__(self, key):
70 return self.filectx(key)
70 return self.filectx(key)
71
71
72 def __iter__(self):
72 def __iter__(self):
73 return iter(self._manifest)
73 return iter(self._manifest)
74
74
75 def _manifestmatches(self, match, s):
75 def _manifestmatches(self, match, s):
76 """generate a new manifest filtered by the match argument
76 """generate a new manifest filtered by the match argument
77
77
78 This method is for internal use only and mainly exists to provide an
78 This method is for internal use only and mainly exists to provide an
79 object oriented way for other contexts to customize the manifest
79 object oriented way for other contexts to customize the manifest
80 generation.
80 generation.
81 """
81 """
82 return self.manifest().matches(match)
82 return self.manifest().matches(match)
83
83
84 def _matchstatus(self, other, match):
84 def _matchstatus(self, other, match):
85 """return match.always if match is none
85 """return match.always if match is none
86
86
87 This internal method provides a way for child objects to override the
87 This internal method provides a way for child objects to override the
88 match operator.
88 match operator.
89 """
89 """
90 return match or matchmod.always(self._repo.root, self._repo.getcwd())
90 return match or matchmod.always(self._repo.root, self._repo.getcwd())
91
91
92 def _buildstatus(self, other, s, match, listignored, listclean,
92 def _buildstatus(self, other, s, match, listignored, listclean,
93 listunknown):
93 listunknown):
94 """build a status with respect to another context"""
94 """build a status with respect to another context"""
95 # Load earliest manifest first for caching reasons. More specifically,
95 # Load earliest manifest first for caching reasons. More specifically,
96 # if you have revisions 1000 and 1001, 1001 is probably stored as a
96 # if you have revisions 1000 and 1001, 1001 is probably stored as a
97 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
97 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
98 # 1000 and cache it so that when you read 1001, we just need to apply a
98 # 1000 and cache it so that when you read 1001, we just need to apply a
99 # delta to what's in the cache. So that's one full reconstruction + one
99 # delta to what's in the cache. So that's one full reconstruction + one
100 # delta application.
100 # delta application.
101 if self.rev() is not None and self.rev() < other.rev():
101 if self.rev() is not None and self.rev() < other.rev():
102 self.manifest()
102 self.manifest()
103 mf1 = other._manifestmatches(match, s)
103 mf1 = other._manifestmatches(match, s)
104 mf2 = self._manifestmatches(match, s)
104 mf2 = self._manifestmatches(match, s)
105
105
106 modified, added = [], []
106 modified, added = [], []
107 removed = []
107 removed = []
108 clean = []
108 clean = []
109 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
109 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
110 deletedset = set(deleted)
110 deletedset = set(deleted)
111 d = mf1.diff(mf2, clean=listclean)
111 d = mf1.diff(mf2, clean=listclean)
112 for fn, value in d.iteritems():
112 for fn, value in d.iteritems():
113 if fn in deletedset:
113 if fn in deletedset:
114 continue
114 continue
115 if value is None:
115 if value is None:
116 clean.append(fn)
116 clean.append(fn)
117 continue
117 continue
118 (node1, flag1), (node2, flag2) = value
118 (node1, flag1), (node2, flag2) = value
119 if node1 is None:
119 if node1 is None:
120 added.append(fn)
120 added.append(fn)
121 elif node2 is None:
121 elif node2 is None:
122 removed.append(fn)
122 removed.append(fn)
123 elif node2 != _newnode:
123 elif node2 != _newnode:
124 # The file was not a new file in mf2, so an entry
124 # The file was not a new file in mf2, so an entry
125 # from diff is really a difference.
125 # from diff is really a difference.
126 modified.append(fn)
126 modified.append(fn)
127 elif self[fn].cmp(other[fn]):
127 elif self[fn].cmp(other[fn]):
128 # node2 was newnode, but the working file doesn't
128 # node2 was newnode, but the working file doesn't
129 # match the one in mf1.
129 # match the one in mf1.
130 modified.append(fn)
130 modified.append(fn)
131 else:
131 else:
132 clean.append(fn)
132 clean.append(fn)
133
133
134 if removed:
134 if removed:
135 # need to filter files if they are already reported as removed
135 # need to filter files if they are already reported as removed
136 unknown = [fn for fn in unknown if fn not in mf1]
136 unknown = [fn for fn in unknown if fn not in mf1]
137 ignored = [fn for fn in ignored if fn not in mf1]
137 ignored = [fn for fn in ignored if fn not in mf1]
138 # if they're deleted, don't report them as removed
138 # if they're deleted, don't report them as removed
139 removed = [fn for fn in removed if fn not in deletedset]
139 removed = [fn for fn in removed if fn not in deletedset]
140
140
141 return scmutil.status(modified, added, removed, deleted, unknown,
141 return scmutil.status(modified, added, removed, deleted, unknown,
142 ignored, clean)
142 ignored, clean)
143
143
144 @propertycache
144 @propertycache
145 def substate(self):
145 def substate(self):
146 return subrepo.state(self, self._repo.ui)
146 return subrepo.state(self, self._repo.ui)
147
147
148 def subrev(self, subpath):
148 def subrev(self, subpath):
149 return self.substate[subpath][1]
149 return self.substate[subpath][1]
150
150
151 def rev(self):
151 def rev(self):
152 return self._rev
152 return self._rev
153 def node(self):
153 def node(self):
154 return self._node
154 return self._node
155 def hex(self):
155 def hex(self):
156 return hex(self.node())
156 return hex(self.node())
157 def manifest(self):
157 def manifest(self):
158 return self._manifest
158 return self._manifest
159 def repo(self):
159 def repo(self):
160 return self._repo
160 return self._repo
161 def phasestr(self):
161 def phasestr(self):
162 return phases.phasenames[self.phase()]
162 return phases.phasenames[self.phase()]
163 def mutable(self):
163 def mutable(self):
164 return self.phase() > phases.public
164 return self.phase() > phases.public
165
165
166 def getfileset(self, expr):
166 def getfileset(self, expr):
167 return fileset.getfileset(self, expr)
167 return fileset.getfileset(self, expr)
168
168
169 def obsolete(self):
169 def obsolete(self):
170 """True if the changeset is obsolete"""
170 """True if the changeset is obsolete"""
171 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
171 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
172
172
173 def extinct(self):
173 def extinct(self):
174 """True if the changeset is extinct"""
174 """True if the changeset is extinct"""
175 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
175 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
176
176
177 def unstable(self):
177 def unstable(self):
178 """True if the changeset is not obsolete but it's ancestor are"""
178 """True if the changeset is not obsolete but it's ancestor are"""
179 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
179 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
180
180
181 def bumped(self):
181 def bumped(self):
182 """True if the changeset try to be a successor of a public changeset
182 """True if the changeset try to be a successor of a public changeset
183
183
184 Only non-public and non-obsolete changesets may be bumped.
184 Only non-public and non-obsolete changesets may be bumped.
185 """
185 """
186 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
186 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
187
187
188 def divergent(self):
188 def divergent(self):
189 """Is a successors of a changeset with multiple possible successors set
189 """Is a successors of a changeset with multiple possible successors set
190
190
191 Only non-public and non-obsolete changesets may be divergent.
191 Only non-public and non-obsolete changesets may be divergent.
192 """
192 """
193 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
193 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
194
194
195 def troubled(self):
195 def troubled(self):
196 """True if the changeset is either unstable, bumped or divergent"""
196 """True if the changeset is either unstable, bumped or divergent"""
197 return self.unstable() or self.bumped() or self.divergent()
197 return self.unstable() or self.bumped() or self.divergent()
198
198
199 def troubles(self):
199 def troubles(self):
200 """return the list of troubles affecting this changesets.
200 """return the list of troubles affecting this changesets.
201
201
202 Troubles are returned as strings. possible values are:
202 Troubles are returned as strings. possible values are:
203 - unstable,
203 - unstable,
204 - bumped,
204 - bumped,
205 - divergent.
205 - divergent.
206 """
206 """
207 troubles = []
207 troubles = []
208 if self.unstable():
208 if self.unstable():
209 troubles.append('unstable')
209 troubles.append('unstable')
210 if self.bumped():
210 if self.bumped():
211 troubles.append('bumped')
211 troubles.append('bumped')
212 if self.divergent():
212 if self.divergent():
213 troubles.append('divergent')
213 troubles.append('divergent')
214 return troubles
214 return troubles
215
215
216 def parents(self):
216 def parents(self):
217 """return contexts for each parent changeset"""
217 """return contexts for each parent changeset"""
218 return self._parents
218 return self._parents
219
219
220 def p1(self):
220 def p1(self):
221 return self._parents[0]
221 return self._parents[0]
222
222
223 def p2(self):
223 def p2(self):
224 if len(self._parents) == 2:
224 if len(self._parents) == 2:
225 return self._parents[1]
225 return self._parents[1]
226 return changectx(self._repo, -1)
226 return changectx(self._repo, -1)
227
227
228 def _fileinfo(self, path):
228 def _fileinfo(self, path):
229 if '_manifest' in self.__dict__:
229 if '_manifest' in self.__dict__:
230 try:
230 try:
231 return self._manifest[path], self._manifest.flags(path)
231 return self._manifest[path], self._manifest.flags(path)
232 except KeyError:
232 except KeyError:
233 raise error.ManifestLookupError(self._node, path,
233 raise error.ManifestLookupError(self._node, path,
234 _('not found in manifest'))
234 _('not found in manifest'))
235 if '_manifestdelta' in self.__dict__ or path in self.files():
235 if '_manifestdelta' in self.__dict__ or path in self.files():
236 if path in self._manifestdelta:
236 if path in self._manifestdelta:
237 return (self._manifestdelta[path],
237 return (self._manifestdelta[path],
238 self._manifestdelta.flags(path))
238 self._manifestdelta.flags(path))
239 node, flag = self._repo.manifest.find(self._changeset[0], path)
239 node, flag = self._repo.manifest.find(self._changeset[0], path)
240 if not node:
240 if not node:
241 raise error.ManifestLookupError(self._node, path,
241 raise error.ManifestLookupError(self._node, path,
242 _('not found in manifest'))
242 _('not found in manifest'))
243
243
244 return node, flag
244 return node, flag
245
245
246 def filenode(self, path):
246 def filenode(self, path):
247 return self._fileinfo(path)[0]
247 return self._fileinfo(path)[0]
248
248
249 def flags(self, path):
249 def flags(self, path):
250 try:
250 try:
251 return self._fileinfo(path)[1]
251 return self._fileinfo(path)[1]
252 except error.LookupError:
252 except error.LookupError:
253 return ''
253 return ''
254
254
255 def sub(self, path):
255 def sub(self, path):
256 '''return a subrepo for the stored revision of path, never wdir()'''
256 '''return a subrepo for the stored revision of path, never wdir()'''
257 return subrepo.subrepo(self, path)
257 return subrepo.subrepo(self, path)
258
258
259 def nullsub(self, path, pctx):
259 def nullsub(self, path, pctx):
260 return subrepo.nullsubrepo(self, path, pctx)
260 return subrepo.nullsubrepo(self, path, pctx)
261
261
262 def workingsub(self, path):
262 def workingsub(self, path):
263 '''return a subrepo for the stored revision, or wdir if this is a wdir
263 '''return a subrepo for the stored revision, or wdir if this is a wdir
264 context.
264 context.
265 '''
265 '''
266 return subrepo.subrepo(self, path, allowwdir=True)
266 return subrepo.subrepo(self, path, allowwdir=True)
267
267
268 def match(self, pats=[], include=None, exclude=None, default='glob',
268 def match(self, pats=[], include=None, exclude=None, default='glob',
269 listsubrepos=False, badfn=None):
269 listsubrepos=False, badfn=None):
270 r = self._repo
270 r = self._repo
271 return matchmod.match(r.root, r.getcwd(), pats,
271 return matchmod.match(r.root, r.getcwd(), pats,
272 include, exclude, default,
272 include, exclude, default,
273 auditor=r.auditor, ctx=self,
273 auditor=r.auditor, ctx=self,
274 listsubrepos=listsubrepos, badfn=badfn)
274 listsubrepos=listsubrepos, badfn=badfn)
275
275
276 def diff(self, ctx2=None, match=None, **opts):
276 def diff(self, ctx2=None, match=None, **opts):
277 """Returns a diff generator for the given contexts and matcher"""
277 """Returns a diff generator for the given contexts and matcher"""
278 if ctx2 is None:
278 if ctx2 is None:
279 ctx2 = self.p1()
279 ctx2 = self.p1()
280 if ctx2 is not None:
280 if ctx2 is not None:
281 ctx2 = self._repo[ctx2]
281 ctx2 = self._repo[ctx2]
282 diffopts = patch.diffopts(self._repo.ui, opts)
282 diffopts = patch.diffopts(self._repo.ui, opts)
283 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
283 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
284
284
285 def dirs(self):
285 def dirs(self):
286 return self._manifest.dirs()
286 return self._manifest.dirs()
287
287
288 def hasdir(self, dir):
288 def hasdir(self, dir):
289 return self._manifest.hasdir(dir)
289 return self._manifest.hasdir(dir)
290
290
291 def dirty(self, missing=False, merge=True, branch=True):
291 def dirty(self, missing=False, merge=True, branch=True):
292 return False
292 return False
293
293
294 def status(self, other=None, match=None, listignored=False,
294 def status(self, other=None, match=None, listignored=False,
295 listclean=False, listunknown=False, listsubrepos=False):
295 listclean=False, listunknown=False, listsubrepos=False):
296 """return status of files between two nodes or node and working
296 """return status of files between two nodes or node and working
297 directory.
297 directory.
298
298
299 If other is None, compare this node with working directory.
299 If other is None, compare this node with working directory.
300
300
301 returns (modified, added, removed, deleted, unknown, ignored, clean)
301 returns (modified, added, removed, deleted, unknown, ignored, clean)
302 """
302 """
303
303
304 ctx1 = self
304 ctx1 = self
305 ctx2 = self._repo[other]
305 ctx2 = self._repo[other]
306
306
307 # This next code block is, admittedly, fragile logic that tests for
307 # This next code block is, admittedly, fragile logic that tests for
308 # reversing the contexts and wouldn't need to exist if it weren't for
308 # reversing the contexts and wouldn't need to exist if it weren't for
309 # the fast (and common) code path of comparing the working directory
309 # the fast (and common) code path of comparing the working directory
310 # with its first parent.
310 # with its first parent.
311 #
311 #
312 # What we're aiming for here is the ability to call:
312 # What we're aiming for here is the ability to call:
313 #
313 #
314 # workingctx.status(parentctx)
314 # workingctx.status(parentctx)
315 #
315 #
316 # If we always built the manifest for each context and compared those,
316 # If we always built the manifest for each context and compared those,
317 # then we'd be done. But the special case of the above call means we
317 # then we'd be done. But the special case of the above call means we
318 # just copy the manifest of the parent.
318 # just copy the manifest of the parent.
319 reversed = False
319 reversed = False
320 if (not isinstance(ctx1, changectx)
320 if (not isinstance(ctx1, changectx)
321 and isinstance(ctx2, changectx)):
321 and isinstance(ctx2, changectx)):
322 reversed = True
322 reversed = True
323 ctx1, ctx2 = ctx2, ctx1
323 ctx1, ctx2 = ctx2, ctx1
324
324
325 match = ctx2._matchstatus(ctx1, match)
325 match = ctx2._matchstatus(ctx1, match)
326 r = scmutil.status([], [], [], [], [], [], [])
326 r = scmutil.status([], [], [], [], [], [], [])
327 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
327 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
328 listunknown)
328 listunknown)
329
329
330 if reversed:
330 if reversed:
331 # Reverse added and removed. Clear deleted, unknown and ignored as
331 # Reverse added and removed. Clear deleted, unknown and ignored as
332 # these make no sense to reverse.
332 # these make no sense to reverse.
333 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
333 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
334 r.clean)
334 r.clean)
335
335
336 if listsubrepos:
336 if listsubrepos:
337 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
337 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
338 rev2 = ctx2.subrev(subpath)
338 rev2 = ctx2.subrev(subpath)
339 try:
339 try:
340 submatch = matchmod.narrowmatcher(subpath, match)
340 submatch = matchmod.narrowmatcher(subpath, match)
341 s = sub.status(rev2, match=submatch, ignored=listignored,
341 s = sub.status(rev2, match=submatch, ignored=listignored,
342 clean=listclean, unknown=listunknown,
342 clean=listclean, unknown=listunknown,
343 listsubrepos=True)
343 listsubrepos=True)
344 for rfiles, sfiles in zip(r, s):
344 for rfiles, sfiles in zip(r, s):
345 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
345 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
346 except error.LookupError:
346 except error.LookupError:
347 self._repo.ui.status(_("skipping missing "
347 self._repo.ui.status(_("skipping missing "
348 "subrepository: %s\n") % subpath)
348 "subrepository: %s\n") % subpath)
349
349
350 for l in r:
350 for l in r:
351 l.sort()
351 l.sort()
352
352
353 return r
353 return r
354
354
355
355
356 def makememctx(repo, parents, text, user, date, branch, files, store,
356 def makememctx(repo, parents, text, user, date, branch, files, store,
357 editor=None, extra=None):
357 editor=None, extra=None):
358 def getfilectx(repo, memctx, path):
358 def getfilectx(repo, memctx, path):
359 data, mode, copied = store.getfile(path)
359 data, mode, copied = store.getfile(path)
360 if data is None:
360 if data is None:
361 return None
361 return None
362 islink, isexec = mode
362 islink, isexec = mode
363 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
363 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
364 copied=copied, memctx=memctx)
364 copied=copied, memctx=memctx)
365 if extra is None:
365 if extra is None:
366 extra = {}
366 extra = {}
367 if branch:
367 if branch:
368 extra['branch'] = encoding.fromlocal(branch)
368 extra['branch'] = encoding.fromlocal(branch)
369 ctx = memctx(repo, parents, text, files, getfilectx, user,
369 ctx = memctx(repo, parents, text, files, getfilectx, user,
370 date, extra, editor)
370 date, extra, editor)
371 return ctx
371 return ctx
372
372
373 class changectx(basectx):
373 class changectx(basectx):
374 """A changecontext object makes access to data related to a particular
374 """A changecontext object makes access to data related to a particular
375 changeset convenient. It represents a read-only context already present in
375 changeset convenient. It represents a read-only context already present in
376 the repo."""
376 the repo."""
377 def __init__(self, repo, changeid=''):
377 def __init__(self, repo, changeid=''):
378 """changeid is a revision number, node, or tag"""
378 """changeid is a revision number, node, or tag"""
379
379
380 # since basectx.__new__ already took care of copying the object, we
380 # since basectx.__new__ already took care of copying the object, we
381 # don't need to do anything in __init__, so we just exit here
381 # don't need to do anything in __init__, so we just exit here
382 if isinstance(changeid, basectx):
382 if isinstance(changeid, basectx):
383 return
383 return
384
384
385 if changeid == '':
385 if changeid == '':
386 changeid = '.'
386 changeid = '.'
387 self._repo = repo
387 self._repo = repo
388
388
389 try:
389 try:
390 if isinstance(changeid, int):
390 if isinstance(changeid, int):
391 self._node = repo.changelog.node(changeid)
391 self._node = repo.changelog.node(changeid)
392 self._rev = changeid
392 self._rev = changeid
393 return
393 return
394 if isinstance(changeid, long):
394 if isinstance(changeid, long):
395 changeid = str(changeid)
395 changeid = str(changeid)
396 if changeid == 'null':
396 if changeid == 'null':
397 self._node = nullid
397 self._node = nullid
398 self._rev = nullrev
398 self._rev = nullrev
399 return
399 return
400 if changeid == 'tip':
400 if changeid == 'tip':
401 self._node = repo.changelog.tip()
401 self._node = repo.changelog.tip()
402 self._rev = repo.changelog.rev(self._node)
402 self._rev = repo.changelog.rev(self._node)
403 return
403 return
404 if changeid == '.' or changeid == repo.dirstate.p1():
404 if changeid == '.' or changeid == repo.dirstate.p1():
405 # this is a hack to delay/avoid loading obsmarkers
405 # this is a hack to delay/avoid loading obsmarkers
406 # when we know that '.' won't be hidden
406 # when we know that '.' won't be hidden
407 self._node = repo.dirstate.p1()
407 self._node = repo.dirstate.p1()
408 self._rev = repo.unfiltered().changelog.rev(self._node)
408 self._rev = repo.unfiltered().changelog.rev(self._node)
409 return
409 return
410 if len(changeid) == 20:
410 if len(changeid) == 20:
411 try:
411 try:
412 self._node = changeid
412 self._node = changeid
413 self._rev = repo.changelog.rev(changeid)
413 self._rev = repo.changelog.rev(changeid)
414 return
414 return
415 except error.FilteredRepoLookupError:
415 except error.FilteredRepoLookupError:
416 raise
416 raise
417 except LookupError:
417 except LookupError:
418 pass
418 pass
419
419
420 try:
420 try:
421 r = int(changeid)
421 r = int(changeid)
422 if str(r) != changeid:
422 if str(r) != changeid:
423 raise ValueError
423 raise ValueError
424 l = len(repo.changelog)
424 l = len(repo.changelog)
425 if r < 0:
425 if r < 0:
426 r += l
426 r += l
427 if r < 0 or r >= l:
427 if r < 0 or r >= l:
428 raise ValueError
428 raise ValueError
429 self._rev = r
429 self._rev = r
430 self._node = repo.changelog.node(r)
430 self._node = repo.changelog.node(r)
431 return
431 return
432 except error.FilteredIndexError:
432 except error.FilteredIndexError:
433 raise
433 raise
434 except (ValueError, OverflowError, IndexError):
434 except (ValueError, OverflowError, IndexError):
435 pass
435 pass
436
436
437 if len(changeid) == 40:
437 if len(changeid) == 40:
438 try:
438 try:
439 self._node = bin(changeid)
439 self._node = bin(changeid)
440 self._rev = repo.changelog.rev(self._node)
440 self._rev = repo.changelog.rev(self._node)
441 return
441 return
442 except error.FilteredLookupError:
442 except error.FilteredLookupError:
443 raise
443 raise
444 except (TypeError, LookupError):
444 except (TypeError, LookupError):
445 pass
445 pass
446
446
447 # lookup bookmarks through the name interface
447 # lookup bookmarks through the name interface
448 try:
448 try:
449 self._node = repo.names.singlenode(repo, changeid)
449 self._node = repo.names.singlenode(repo, changeid)
450 self._rev = repo.changelog.rev(self._node)
450 self._rev = repo.changelog.rev(self._node)
451 return
451 return
452 except KeyError:
452 except KeyError:
453 pass
453 pass
454 except error.FilteredRepoLookupError:
454 except error.FilteredRepoLookupError:
455 raise
455 raise
456 except error.RepoLookupError:
456 except error.RepoLookupError:
457 pass
457 pass
458
458
459 self._node = repo.unfiltered().changelog._partialmatch(changeid)
459 self._node = repo.unfiltered().changelog._partialmatch(changeid)
460 if self._node is not None:
460 if self._node is not None:
461 self._rev = repo.changelog.rev(self._node)
461 self._rev = repo.changelog.rev(self._node)
462 return
462 return
463
463
464 # lookup failed
464 # lookup failed
465 # check if it might have come from damaged dirstate
465 # check if it might have come from damaged dirstate
466 #
466 #
467 # XXX we could avoid the unfiltered if we had a recognizable
467 # XXX we could avoid the unfiltered if we had a recognizable
468 # exception for filtered changeset access
468 # exception for filtered changeset access
469 if changeid in repo.unfiltered().dirstate.parents():
469 if changeid in repo.unfiltered().dirstate.parents():
470 msg = _("working directory has unknown parent '%s'!")
470 msg = _("working directory has unknown parent '%s'!")
471 raise error.Abort(msg % short(changeid))
471 raise error.Abort(msg % short(changeid))
472 try:
472 try:
473 if len(changeid) == 20 and nonascii(changeid):
473 if len(changeid) == 20 and nonascii(changeid):
474 changeid = hex(changeid)
474 changeid = hex(changeid)
475 except TypeError:
475 except TypeError:
476 pass
476 pass
477 except (error.FilteredIndexError, error.FilteredLookupError,
477 except (error.FilteredIndexError, error.FilteredLookupError,
478 error.FilteredRepoLookupError):
478 error.FilteredRepoLookupError):
479 if repo.filtername.startswith('visible'):
479 if repo.filtername.startswith('visible'):
480 msg = _("hidden revision '%s'") % changeid
480 msg = _("hidden revision '%s'") % changeid
481 hint = _('use --hidden to access hidden revisions')
481 hint = _('use --hidden to access hidden revisions')
482 raise error.FilteredRepoLookupError(msg, hint=hint)
482 raise error.FilteredRepoLookupError(msg, hint=hint)
483 msg = _("filtered revision '%s' (not in '%s' subset)")
483 msg = _("filtered revision '%s' (not in '%s' subset)")
484 msg %= (changeid, repo.filtername)
484 msg %= (changeid, repo.filtername)
485 raise error.FilteredRepoLookupError(msg)
485 raise error.FilteredRepoLookupError(msg)
486 except IndexError:
486 except IndexError:
487 pass
487 pass
488 raise error.RepoLookupError(
488 raise error.RepoLookupError(
489 _("unknown revision '%s'") % changeid)
489 _("unknown revision '%s'") % changeid)
490
490
491 def __hash__(self):
491 def __hash__(self):
492 try:
492 try:
493 return hash(self._rev)
493 return hash(self._rev)
494 except AttributeError:
494 except AttributeError:
495 return id(self)
495 return id(self)
496
496
497 def __nonzero__(self):
497 def __nonzero__(self):
498 return self._rev != nullrev
498 return self._rev != nullrev
499
499
500 @propertycache
500 @propertycache
501 def _changeset(self):
501 def _changeset(self):
502 return self._repo.changelog.read(self.rev())
502 return self._repo.changelog.read(self.rev())
503
503
504 @propertycache
504 @propertycache
505 def _manifest(self):
505 def _manifest(self):
506 return self._repo.manifest.read(self._changeset[0])
506 return self._repo.manifest.read(self._changeset[0])
507
507
508 @propertycache
508 @propertycache
509 def _manifestdelta(self):
509 def _manifestdelta(self):
510 return self._repo.manifest.readdelta(self._changeset[0])
510 return self._repo.manifest.readdelta(self._changeset[0])
511
511
512 @propertycache
512 @propertycache
513 def _parents(self):
513 def _parents(self):
514 p = self._repo.changelog.parentrevs(self._rev)
514 repo = self._repo
515 if p[1] == nullrev:
515 p1, p2 = repo.changelog.parentrevs(self._rev)
516 p = p[:-1]
516 if p2 == nullrev:
517 return [changectx(self._repo, x) for x in p]
517 return [changectx(repo, p1)]
518 return [changectx(repo, p1), changectx(repo, p2)]
518
519
519 def changeset(self):
520 def changeset(self):
520 return self._changeset
521 return self._changeset
521 def manifestnode(self):
522 def manifestnode(self):
522 return self._changeset[0]
523 return self._changeset[0]
523
524
524 def user(self):
525 def user(self):
525 return self._changeset[1]
526 return self._changeset[1]
526 def date(self):
527 def date(self):
527 return self._changeset[2]
528 return self._changeset[2]
528 def files(self):
529 def files(self):
529 return self._changeset[3]
530 return self._changeset[3]
530 def description(self):
531 def description(self):
531 return self._changeset[4]
532 return self._changeset[4]
532 def branch(self):
533 def branch(self):
533 return encoding.tolocal(self._changeset[5].get("branch"))
534 return encoding.tolocal(self._changeset[5].get("branch"))
534 def closesbranch(self):
535 def closesbranch(self):
535 return 'close' in self._changeset[5]
536 return 'close' in self._changeset[5]
536 def extra(self):
537 def extra(self):
537 return self._changeset[5]
538 return self._changeset[5]
538 def tags(self):
539 def tags(self):
539 return self._repo.nodetags(self._node)
540 return self._repo.nodetags(self._node)
540 def bookmarks(self):
541 def bookmarks(self):
541 return self._repo.nodebookmarks(self._node)
542 return self._repo.nodebookmarks(self._node)
542 def phase(self):
543 def phase(self):
543 return self._repo._phasecache.phase(self._repo, self._rev)
544 return self._repo._phasecache.phase(self._repo, self._rev)
544 def hidden(self):
545 def hidden(self):
545 return self._rev in repoview.filterrevs(self._repo, 'visible')
546 return self._rev in repoview.filterrevs(self._repo, 'visible')
546
547
547 def children(self):
548 def children(self):
548 """return contexts for each child changeset"""
549 """return contexts for each child changeset"""
549 c = self._repo.changelog.children(self._node)
550 c = self._repo.changelog.children(self._node)
550 return [changectx(self._repo, x) for x in c]
551 return [changectx(self._repo, x) for x in c]
551
552
552 def ancestors(self):
553 def ancestors(self):
553 for a in self._repo.changelog.ancestors([self._rev]):
554 for a in self._repo.changelog.ancestors([self._rev]):
554 yield changectx(self._repo, a)
555 yield changectx(self._repo, a)
555
556
556 def descendants(self):
557 def descendants(self):
557 for d in self._repo.changelog.descendants([self._rev]):
558 for d in self._repo.changelog.descendants([self._rev]):
558 yield changectx(self._repo, d)
559 yield changectx(self._repo, d)
559
560
560 def filectx(self, path, fileid=None, filelog=None):
561 def filectx(self, path, fileid=None, filelog=None):
561 """get a file context from this changeset"""
562 """get a file context from this changeset"""
562 if fileid is None:
563 if fileid is None:
563 fileid = self.filenode(path)
564 fileid = self.filenode(path)
564 return filectx(self._repo, path, fileid=fileid,
565 return filectx(self._repo, path, fileid=fileid,
565 changectx=self, filelog=filelog)
566 changectx=self, filelog=filelog)
566
567
567 def ancestor(self, c2, warn=False):
568 def ancestor(self, c2, warn=False):
568 """return the "best" ancestor context of self and c2
569 """return the "best" ancestor context of self and c2
569
570
570 If there are multiple candidates, it will show a message and check
571 If there are multiple candidates, it will show a message and check
571 merge.preferancestor configuration before falling back to the
572 merge.preferancestor configuration before falling back to the
572 revlog ancestor."""
573 revlog ancestor."""
573 # deal with workingctxs
574 # deal with workingctxs
574 n2 = c2._node
575 n2 = c2._node
575 if n2 is None:
576 if n2 is None:
576 n2 = c2._parents[0]._node
577 n2 = c2._parents[0]._node
577 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
578 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
578 if not cahs:
579 if not cahs:
579 anc = nullid
580 anc = nullid
580 elif len(cahs) == 1:
581 elif len(cahs) == 1:
581 anc = cahs[0]
582 anc = cahs[0]
582 else:
583 else:
583 # experimental config: merge.preferancestor
584 # experimental config: merge.preferancestor
584 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
585 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
585 try:
586 try:
586 ctx = changectx(self._repo, r)
587 ctx = changectx(self._repo, r)
587 except error.RepoLookupError:
588 except error.RepoLookupError:
588 continue
589 continue
589 anc = ctx.node()
590 anc = ctx.node()
590 if anc in cahs:
591 if anc in cahs:
591 break
592 break
592 else:
593 else:
593 anc = self._repo.changelog.ancestor(self._node, n2)
594 anc = self._repo.changelog.ancestor(self._node, n2)
594 if warn:
595 if warn:
595 self._repo.ui.status(
596 self._repo.ui.status(
596 (_("note: using %s as ancestor of %s and %s\n") %
597 (_("note: using %s as ancestor of %s and %s\n") %
597 (short(anc), short(self._node), short(n2))) +
598 (short(anc), short(self._node), short(n2))) +
598 ''.join(_(" alternatively, use --config "
599 ''.join(_(" alternatively, use --config "
599 "merge.preferancestor=%s\n") %
600 "merge.preferancestor=%s\n") %
600 short(n) for n in sorted(cahs) if n != anc))
601 short(n) for n in sorted(cahs) if n != anc))
601 return changectx(self._repo, anc)
602 return changectx(self._repo, anc)
602
603
603 def descendant(self, other):
604 def descendant(self, other):
604 """True if other is descendant of this changeset"""
605 """True if other is descendant of this changeset"""
605 return self._repo.changelog.descendant(self._rev, other._rev)
606 return self._repo.changelog.descendant(self._rev, other._rev)
606
607
607 def walk(self, match):
608 def walk(self, match):
608 '''Generates matching file names.'''
609 '''Generates matching file names.'''
609
610
610 # Wrap match.bad method to have message with nodeid
611 # Wrap match.bad method to have message with nodeid
611 def bad(fn, msg):
612 def bad(fn, msg):
612 # The manifest doesn't know about subrepos, so don't complain about
613 # The manifest doesn't know about subrepos, so don't complain about
613 # paths into valid subrepos.
614 # paths into valid subrepos.
614 if any(fn == s or fn.startswith(s + '/')
615 if any(fn == s or fn.startswith(s + '/')
615 for s in self.substate):
616 for s in self.substate):
616 return
617 return
617 match.bad(fn, _('no such file in rev %s') % self)
618 match.bad(fn, _('no such file in rev %s') % self)
618
619
619 m = matchmod.badmatch(match, bad)
620 m = matchmod.badmatch(match, bad)
620 return self._manifest.walk(m)
621 return self._manifest.walk(m)
621
622
622 def matches(self, match):
623 def matches(self, match):
623 return self.walk(match)
624 return self.walk(match)
624
625
625 class basefilectx(object):
626 class basefilectx(object):
626 """A filecontext object represents the common logic for its children:
627 """A filecontext object represents the common logic for its children:
627 filectx: read-only access to a filerevision that is already present
628 filectx: read-only access to a filerevision that is already present
628 in the repo,
629 in the repo,
629 workingfilectx: a filecontext that represents files from the working
630 workingfilectx: a filecontext that represents files from the working
630 directory,
631 directory,
631 memfilectx: a filecontext that represents files in-memory."""
632 memfilectx: a filecontext that represents files in-memory."""
632 def __new__(cls, repo, path, *args, **kwargs):
633 def __new__(cls, repo, path, *args, **kwargs):
633 return super(basefilectx, cls).__new__(cls)
634 return super(basefilectx, cls).__new__(cls)
634
635
635 @propertycache
636 @propertycache
636 def _filelog(self):
637 def _filelog(self):
637 return self._repo.file(self._path)
638 return self._repo.file(self._path)
638
639
639 @propertycache
640 @propertycache
640 def _changeid(self):
641 def _changeid(self):
641 if '_changeid' in self.__dict__:
642 if '_changeid' in self.__dict__:
642 return self._changeid
643 return self._changeid
643 elif '_changectx' in self.__dict__:
644 elif '_changectx' in self.__dict__:
644 return self._changectx.rev()
645 return self._changectx.rev()
645 elif '_descendantrev' in self.__dict__:
646 elif '_descendantrev' in self.__dict__:
646 # this file context was created from a revision with a known
647 # this file context was created from a revision with a known
647 # descendant, we can (lazily) correct for linkrev aliases
648 # descendant, we can (lazily) correct for linkrev aliases
648 return self._adjustlinkrev(self._path, self._filelog,
649 return self._adjustlinkrev(self._path, self._filelog,
649 self._filenode, self._descendantrev)
650 self._filenode, self._descendantrev)
650 else:
651 else:
651 return self._filelog.linkrev(self._filerev)
652 return self._filelog.linkrev(self._filerev)
652
653
653 @propertycache
654 @propertycache
654 def _filenode(self):
655 def _filenode(self):
655 if '_fileid' in self.__dict__:
656 if '_fileid' in self.__dict__:
656 return self._filelog.lookup(self._fileid)
657 return self._filelog.lookup(self._fileid)
657 else:
658 else:
658 return self._changectx.filenode(self._path)
659 return self._changectx.filenode(self._path)
659
660
660 @propertycache
661 @propertycache
661 def _filerev(self):
662 def _filerev(self):
662 return self._filelog.rev(self._filenode)
663 return self._filelog.rev(self._filenode)
663
664
664 @propertycache
665 @propertycache
665 def _repopath(self):
666 def _repopath(self):
666 return self._path
667 return self._path
667
668
668 def __nonzero__(self):
669 def __nonzero__(self):
669 try:
670 try:
670 self._filenode
671 self._filenode
671 return True
672 return True
672 except error.LookupError:
673 except error.LookupError:
673 # file is missing
674 # file is missing
674 return False
675 return False
675
676
676 def __str__(self):
677 def __str__(self):
677 return "%s@%s" % (self.path(), self._changectx)
678 return "%s@%s" % (self.path(), self._changectx)
678
679
679 def __repr__(self):
680 def __repr__(self):
680 return "<%s %s>" % (type(self).__name__, str(self))
681 return "<%s %s>" % (type(self).__name__, str(self))
681
682
682 def __hash__(self):
683 def __hash__(self):
683 try:
684 try:
684 return hash((self._path, self._filenode))
685 return hash((self._path, self._filenode))
685 except AttributeError:
686 except AttributeError:
686 return id(self)
687 return id(self)
687
688
688 def __eq__(self, other):
689 def __eq__(self, other):
689 try:
690 try:
690 return (type(self) == type(other) and self._path == other._path
691 return (type(self) == type(other) and self._path == other._path
691 and self._filenode == other._filenode)
692 and self._filenode == other._filenode)
692 except AttributeError:
693 except AttributeError:
693 return False
694 return False
694
695
695 def __ne__(self, other):
696 def __ne__(self, other):
696 return not (self == other)
697 return not (self == other)
697
698
698 def filerev(self):
699 def filerev(self):
699 return self._filerev
700 return self._filerev
700 def filenode(self):
701 def filenode(self):
701 return self._filenode
702 return self._filenode
702 def flags(self):
703 def flags(self):
703 return self._changectx.flags(self._path)
704 return self._changectx.flags(self._path)
704 def filelog(self):
705 def filelog(self):
705 return self._filelog
706 return self._filelog
706 def rev(self):
707 def rev(self):
707 return self._changeid
708 return self._changeid
708 def linkrev(self):
709 def linkrev(self):
709 return self._filelog.linkrev(self._filerev)
710 return self._filelog.linkrev(self._filerev)
710 def node(self):
711 def node(self):
711 return self._changectx.node()
712 return self._changectx.node()
712 def hex(self):
713 def hex(self):
713 return self._changectx.hex()
714 return self._changectx.hex()
714 def user(self):
715 def user(self):
715 return self._changectx.user()
716 return self._changectx.user()
716 def date(self):
717 def date(self):
717 return self._changectx.date()
718 return self._changectx.date()
718 def files(self):
719 def files(self):
719 return self._changectx.files()
720 return self._changectx.files()
720 def description(self):
721 def description(self):
721 return self._changectx.description()
722 return self._changectx.description()
722 def branch(self):
723 def branch(self):
723 return self._changectx.branch()
724 return self._changectx.branch()
724 def extra(self):
725 def extra(self):
725 return self._changectx.extra()
726 return self._changectx.extra()
726 def phase(self):
727 def phase(self):
727 return self._changectx.phase()
728 return self._changectx.phase()
728 def phasestr(self):
729 def phasestr(self):
729 return self._changectx.phasestr()
730 return self._changectx.phasestr()
730 def manifest(self):
731 def manifest(self):
731 return self._changectx.manifest()
732 return self._changectx.manifest()
732 def changectx(self):
733 def changectx(self):
733 return self._changectx
734 return self._changectx
734 def repo(self):
735 def repo(self):
735 return self._repo
736 return self._repo
736
737
737 def path(self):
738 def path(self):
738 return self._path
739 return self._path
739
740
740 def isbinary(self):
741 def isbinary(self):
741 try:
742 try:
742 return util.binary(self.data())
743 return util.binary(self.data())
743 except IOError:
744 except IOError:
744 return False
745 return False
745 def isexec(self):
746 def isexec(self):
746 return 'x' in self.flags()
747 return 'x' in self.flags()
747 def islink(self):
748 def islink(self):
748 return 'l' in self.flags()
749 return 'l' in self.flags()
749
750
750 def isabsent(self):
751 def isabsent(self):
751 """whether this filectx represents a file not in self._changectx
752 """whether this filectx represents a file not in self._changectx
752
753
753 This is mainly for merge code to detect change/delete conflicts. This is
754 This is mainly for merge code to detect change/delete conflicts. This is
754 expected to be True for all subclasses of basectx."""
755 expected to be True for all subclasses of basectx."""
755 return False
756 return False
756
757
757 _customcmp = False
758 _customcmp = False
758 def cmp(self, fctx):
759 def cmp(self, fctx):
759 """compare with other file context
760 """compare with other file context
760
761
761 returns True if different than fctx.
762 returns True if different than fctx.
762 """
763 """
763 if fctx._customcmp:
764 if fctx._customcmp:
764 return fctx.cmp(self)
765 return fctx.cmp(self)
765
766
766 if (fctx._filerev is None
767 if (fctx._filerev is None
767 and (self._repo._encodefilterpats
768 and (self._repo._encodefilterpats
768 # if file data starts with '\1\n', empty metadata block is
769 # if file data starts with '\1\n', empty metadata block is
769 # prepended, which adds 4 bytes to filelog.size().
770 # prepended, which adds 4 bytes to filelog.size().
770 or self.size() - 4 == fctx.size())
771 or self.size() - 4 == fctx.size())
771 or self.size() == fctx.size()):
772 or self.size() == fctx.size()):
772 return self._filelog.cmp(self._filenode, fctx.data())
773 return self._filelog.cmp(self._filenode, fctx.data())
773
774
774 return True
775 return True
775
776
776 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
777 def _adjustlinkrev(self, path, filelog, fnode, srcrev, inclusive=False):
777 """return the first ancestor of <srcrev> introducing <fnode>
778 """return the first ancestor of <srcrev> introducing <fnode>
778
779
779 If the linkrev of the file revision does not point to an ancestor of
780 If the linkrev of the file revision does not point to an ancestor of
780 srcrev, we'll walk down the ancestors until we find one introducing
781 srcrev, we'll walk down the ancestors until we find one introducing
781 this file revision.
782 this file revision.
782
783
783 :repo: a localrepository object (used to access changelog and manifest)
784 :repo: a localrepository object (used to access changelog and manifest)
784 :path: the file path
785 :path: the file path
785 :fnode: the nodeid of the file revision
786 :fnode: the nodeid of the file revision
786 :filelog: the filelog of this path
787 :filelog: the filelog of this path
787 :srcrev: the changeset revision we search ancestors from
788 :srcrev: the changeset revision we search ancestors from
788 :inclusive: if true, the src revision will also be checked
789 :inclusive: if true, the src revision will also be checked
789 """
790 """
790 repo = self._repo
791 repo = self._repo
791 cl = repo.unfiltered().changelog
792 cl = repo.unfiltered().changelog
792 ma = repo.manifest
793 ma = repo.manifest
793 # fetch the linkrev
794 # fetch the linkrev
794 fr = filelog.rev(fnode)
795 fr = filelog.rev(fnode)
795 lkr = filelog.linkrev(fr)
796 lkr = filelog.linkrev(fr)
796 # hack to reuse ancestor computation when searching for renames
797 # hack to reuse ancestor computation when searching for renames
797 memberanc = getattr(self, '_ancestrycontext', None)
798 memberanc = getattr(self, '_ancestrycontext', None)
798 iteranc = None
799 iteranc = None
799 if srcrev is None:
800 if srcrev is None:
800 # wctx case, used by workingfilectx during mergecopy
801 # wctx case, used by workingfilectx during mergecopy
801 revs = [p.rev() for p in self._repo[None].parents()]
802 revs = [p.rev() for p in self._repo[None].parents()]
802 inclusive = True # we skipped the real (revless) source
803 inclusive = True # we skipped the real (revless) source
803 else:
804 else:
804 revs = [srcrev]
805 revs = [srcrev]
805 if memberanc is None:
806 if memberanc is None:
806 memberanc = iteranc = cl.ancestors(revs, lkr,
807 memberanc = iteranc = cl.ancestors(revs, lkr,
807 inclusive=inclusive)
808 inclusive=inclusive)
808 # check if this linkrev is an ancestor of srcrev
809 # check if this linkrev is an ancestor of srcrev
809 if lkr not in memberanc:
810 if lkr not in memberanc:
810 if iteranc is None:
811 if iteranc is None:
811 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
812 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
812 for a in iteranc:
813 for a in iteranc:
813 ac = cl.read(a) # get changeset data (we avoid object creation)
814 ac = cl.read(a) # get changeset data (we avoid object creation)
814 if path in ac[3]: # checking the 'files' field.
815 if path in ac[3]: # checking the 'files' field.
815 # The file has been touched, check if the content is
816 # The file has been touched, check if the content is
816 # similar to the one we search for.
817 # similar to the one we search for.
817 if fnode == ma.readfast(ac[0]).get(path):
818 if fnode == ma.readfast(ac[0]).get(path):
818 return a
819 return a
819 # In theory, we should never get out of that loop without a result.
820 # In theory, we should never get out of that loop without a result.
820 # But if manifest uses a buggy file revision (not children of the
821 # But if manifest uses a buggy file revision (not children of the
821 # one it replaces) we could. Such a buggy situation will likely
822 # one it replaces) we could. Such a buggy situation will likely
822 # result is crash somewhere else at to some point.
823 # result is crash somewhere else at to some point.
823 return lkr
824 return lkr
824
825
825 def introrev(self):
826 def introrev(self):
826 """return the rev of the changeset which introduced this file revision
827 """return the rev of the changeset which introduced this file revision
827
828
828 This method is different from linkrev because it take into account the
829 This method is different from linkrev because it take into account the
829 changeset the filectx was created from. It ensures the returned
830 changeset the filectx was created from. It ensures the returned
830 revision is one of its ancestors. This prevents bugs from
831 revision is one of its ancestors. This prevents bugs from
831 'linkrev-shadowing' when a file revision is used by multiple
832 'linkrev-shadowing' when a file revision is used by multiple
832 changesets.
833 changesets.
833 """
834 """
834 lkr = self.linkrev()
835 lkr = self.linkrev()
835 attrs = vars(self)
836 attrs = vars(self)
836 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
837 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
837 if noctx or self.rev() == lkr:
838 if noctx or self.rev() == lkr:
838 return self.linkrev()
839 return self.linkrev()
839 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
840 return self._adjustlinkrev(self._path, self._filelog, self._filenode,
840 self.rev(), inclusive=True)
841 self.rev(), inclusive=True)
841
842
842 def _parentfilectx(self, path, fileid, filelog):
843 def _parentfilectx(self, path, fileid, filelog):
843 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
844 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
844 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
845 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
845 if '_changeid' in vars(self) or '_changectx' in vars(self):
846 if '_changeid' in vars(self) or '_changectx' in vars(self):
846 # If self is associated with a changeset (probably explicitly
847 # If self is associated with a changeset (probably explicitly
847 # fed), ensure the created filectx is associated with a
848 # fed), ensure the created filectx is associated with a
848 # changeset that is an ancestor of self.changectx.
849 # changeset that is an ancestor of self.changectx.
849 # This lets us later use _adjustlinkrev to get a correct link.
850 # This lets us later use _adjustlinkrev to get a correct link.
850 fctx._descendantrev = self.rev()
851 fctx._descendantrev = self.rev()
851 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
852 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
852 elif '_descendantrev' in vars(self):
853 elif '_descendantrev' in vars(self):
853 # Otherwise propagate _descendantrev if we have one associated.
854 # Otherwise propagate _descendantrev if we have one associated.
854 fctx._descendantrev = self._descendantrev
855 fctx._descendantrev = self._descendantrev
855 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
856 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
856 return fctx
857 return fctx
857
858
858 def parents(self):
859 def parents(self):
859 _path = self._path
860 _path = self._path
860 fl = self._filelog
861 fl = self._filelog
861 parents = self._filelog.parents(self._filenode)
862 parents = self._filelog.parents(self._filenode)
862 pl = [(_path, node, fl) for node in parents if node != nullid]
863 pl = [(_path, node, fl) for node in parents if node != nullid]
863
864
864 r = fl.renamed(self._filenode)
865 r = fl.renamed(self._filenode)
865 if r:
866 if r:
866 # - In the simple rename case, both parent are nullid, pl is empty.
867 # - In the simple rename case, both parent are nullid, pl is empty.
867 # - In case of merge, only one of the parent is null id and should
868 # - In case of merge, only one of the parent is null id and should
868 # be replaced with the rename information. This parent is -always-
869 # be replaced with the rename information. This parent is -always-
869 # the first one.
870 # the first one.
870 #
871 #
871 # As null id have always been filtered out in the previous list
872 # As null id have always been filtered out in the previous list
872 # comprehension, inserting to 0 will always result in "replacing
873 # comprehension, inserting to 0 will always result in "replacing
873 # first nullid parent with rename information.
874 # first nullid parent with rename information.
874 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
875 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
875
876
876 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
877 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
877
878
878 def p1(self):
879 def p1(self):
879 return self.parents()[0]
880 return self.parents()[0]
880
881
881 def p2(self):
882 def p2(self):
882 p = self.parents()
883 p = self.parents()
883 if len(p) == 2:
884 if len(p) == 2:
884 return p[1]
885 return p[1]
885 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
886 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
886
887
887 def annotate(self, follow=False, linenumber=None, diffopts=None):
888 def annotate(self, follow=False, linenumber=None, diffopts=None):
888 '''returns a list of tuples of (ctx, line) for each line
889 '''returns a list of tuples of (ctx, line) for each line
889 in the file, where ctx is the filectx of the node where
890 in the file, where ctx is the filectx of the node where
890 that line was last changed.
891 that line was last changed.
891 This returns tuples of ((ctx, linenumber), line) for each line,
892 This returns tuples of ((ctx, linenumber), line) for each line,
892 if "linenumber" parameter is NOT "None".
893 if "linenumber" parameter is NOT "None".
893 In such tuples, linenumber means one at the first appearance
894 In such tuples, linenumber means one at the first appearance
894 in the managed file.
895 in the managed file.
895 To reduce annotation cost,
896 To reduce annotation cost,
896 this returns fixed value(False is used) as linenumber,
897 this returns fixed value(False is used) as linenumber,
897 if "linenumber" parameter is "False".'''
898 if "linenumber" parameter is "False".'''
898
899
899 if linenumber is None:
900 if linenumber is None:
900 def decorate(text, rev):
901 def decorate(text, rev):
901 return ([rev] * len(text.splitlines()), text)
902 return ([rev] * len(text.splitlines()), text)
902 elif linenumber:
903 elif linenumber:
903 def decorate(text, rev):
904 def decorate(text, rev):
904 size = len(text.splitlines())
905 size = len(text.splitlines())
905 return ([(rev, i) for i in xrange(1, size + 1)], text)
906 return ([(rev, i) for i in xrange(1, size + 1)], text)
906 else:
907 else:
907 def decorate(text, rev):
908 def decorate(text, rev):
908 return ([(rev, False)] * len(text.splitlines()), text)
909 return ([(rev, False)] * len(text.splitlines()), text)
909
910
910 def pair(parent, child):
911 def pair(parent, child):
911 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
912 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts,
912 refine=True)
913 refine=True)
913 for (a1, a2, b1, b2), t in blocks:
914 for (a1, a2, b1, b2), t in blocks:
914 # Changed blocks ('!') or blocks made only of blank lines ('~')
915 # Changed blocks ('!') or blocks made only of blank lines ('~')
915 # belong to the child.
916 # belong to the child.
916 if t == '=':
917 if t == '=':
917 child[0][b1:b2] = parent[0][a1:a2]
918 child[0][b1:b2] = parent[0][a1:a2]
918 return child
919 return child
919
920
920 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
921 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
921
922
922 def parents(f):
923 def parents(f):
923 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
924 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
924 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
925 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
925 # from the topmost introrev (= srcrev) down to p.linkrev() if it
926 # from the topmost introrev (= srcrev) down to p.linkrev() if it
926 # isn't an ancestor of the srcrev.
927 # isn't an ancestor of the srcrev.
927 f._changeid
928 f._changeid
928 pl = f.parents()
929 pl = f.parents()
929
930
930 # Don't return renamed parents if we aren't following.
931 # Don't return renamed parents if we aren't following.
931 if not follow:
932 if not follow:
932 pl = [p for p in pl if p.path() == f.path()]
933 pl = [p for p in pl if p.path() == f.path()]
933
934
934 # renamed filectx won't have a filelog yet, so set it
935 # renamed filectx won't have a filelog yet, so set it
935 # from the cache to save time
936 # from the cache to save time
936 for p in pl:
937 for p in pl:
937 if not '_filelog' in p.__dict__:
938 if not '_filelog' in p.__dict__:
938 p._filelog = getlog(p.path())
939 p._filelog = getlog(p.path())
939
940
940 return pl
941 return pl
941
942
942 # use linkrev to find the first changeset where self appeared
943 # use linkrev to find the first changeset where self appeared
943 base = self
944 base = self
944 introrev = self.introrev()
945 introrev = self.introrev()
945 if self.rev() != introrev:
946 if self.rev() != introrev:
946 base = self.filectx(self.filenode(), changeid=introrev)
947 base = self.filectx(self.filenode(), changeid=introrev)
947 if getattr(base, '_ancestrycontext', None) is None:
948 if getattr(base, '_ancestrycontext', None) is None:
948 cl = self._repo.changelog
949 cl = self._repo.changelog
949 if introrev is None:
950 if introrev is None:
950 # wctx is not inclusive, but works because _ancestrycontext
951 # wctx is not inclusive, but works because _ancestrycontext
951 # is used to test filelog revisions
952 # is used to test filelog revisions
952 ac = cl.ancestors([p.rev() for p in base.parents()],
953 ac = cl.ancestors([p.rev() for p in base.parents()],
953 inclusive=True)
954 inclusive=True)
954 else:
955 else:
955 ac = cl.ancestors([introrev], inclusive=True)
956 ac = cl.ancestors([introrev], inclusive=True)
956 base._ancestrycontext = ac
957 base._ancestrycontext = ac
957
958
958 # This algorithm would prefer to be recursive, but Python is a
959 # This algorithm would prefer to be recursive, but Python is a
959 # bit recursion-hostile. Instead we do an iterative
960 # bit recursion-hostile. Instead we do an iterative
960 # depth-first search.
961 # depth-first search.
961
962
962 visit = [base]
963 visit = [base]
963 hist = {}
964 hist = {}
964 pcache = {}
965 pcache = {}
965 needed = {base: 1}
966 needed = {base: 1}
966 while visit:
967 while visit:
967 f = visit[-1]
968 f = visit[-1]
968 pcached = f in pcache
969 pcached = f in pcache
969 if not pcached:
970 if not pcached:
970 pcache[f] = parents(f)
971 pcache[f] = parents(f)
971
972
972 ready = True
973 ready = True
973 pl = pcache[f]
974 pl = pcache[f]
974 for p in pl:
975 for p in pl:
975 if p not in hist:
976 if p not in hist:
976 ready = False
977 ready = False
977 visit.append(p)
978 visit.append(p)
978 if not pcached:
979 if not pcached:
979 needed[p] = needed.get(p, 0) + 1
980 needed[p] = needed.get(p, 0) + 1
980 if ready:
981 if ready:
981 visit.pop()
982 visit.pop()
982 reusable = f in hist
983 reusable = f in hist
983 if reusable:
984 if reusable:
984 curr = hist[f]
985 curr = hist[f]
985 else:
986 else:
986 curr = decorate(f.data(), f)
987 curr = decorate(f.data(), f)
987 for p in pl:
988 for p in pl:
988 if not reusable:
989 if not reusable:
989 curr = pair(hist[p], curr)
990 curr = pair(hist[p], curr)
990 if needed[p] == 1:
991 if needed[p] == 1:
991 del hist[p]
992 del hist[p]
992 del needed[p]
993 del needed[p]
993 else:
994 else:
994 needed[p] -= 1
995 needed[p] -= 1
995
996
996 hist[f] = curr
997 hist[f] = curr
997 pcache[f] = []
998 pcache[f] = []
998
999
999 return zip(hist[base][0], hist[base][1].splitlines(True))
1000 return zip(hist[base][0], hist[base][1].splitlines(True))
1000
1001
1001 def ancestors(self, followfirst=False):
1002 def ancestors(self, followfirst=False):
1002 visit = {}
1003 visit = {}
1003 c = self
1004 c = self
1004 if followfirst:
1005 if followfirst:
1005 cut = 1
1006 cut = 1
1006 else:
1007 else:
1007 cut = None
1008 cut = None
1008
1009
1009 while True:
1010 while True:
1010 for parent in c.parents()[:cut]:
1011 for parent in c.parents()[:cut]:
1011 visit[(parent.linkrev(), parent.filenode())] = parent
1012 visit[(parent.linkrev(), parent.filenode())] = parent
1012 if not visit:
1013 if not visit:
1013 break
1014 break
1014 c = visit.pop(max(visit))
1015 c = visit.pop(max(visit))
1015 yield c
1016 yield c
1016
1017
1017 class filectx(basefilectx):
1018 class filectx(basefilectx):
1018 """A filecontext object makes access to data related to a particular
1019 """A filecontext object makes access to data related to a particular
1019 filerevision convenient."""
1020 filerevision convenient."""
1020 def __init__(self, repo, path, changeid=None, fileid=None,
1021 def __init__(self, repo, path, changeid=None, fileid=None,
1021 filelog=None, changectx=None):
1022 filelog=None, changectx=None):
1022 """changeid can be a changeset revision, node, or tag.
1023 """changeid can be a changeset revision, node, or tag.
1023 fileid can be a file revision or node."""
1024 fileid can be a file revision or node."""
1024 self._repo = repo
1025 self._repo = repo
1025 self._path = path
1026 self._path = path
1026
1027
1027 assert (changeid is not None
1028 assert (changeid is not None
1028 or fileid is not None
1029 or fileid is not None
1029 or changectx is not None), \
1030 or changectx is not None), \
1030 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1031 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1031 % (changeid, fileid, changectx))
1032 % (changeid, fileid, changectx))
1032
1033
1033 if filelog is not None:
1034 if filelog is not None:
1034 self._filelog = filelog
1035 self._filelog = filelog
1035
1036
1036 if changeid is not None:
1037 if changeid is not None:
1037 self._changeid = changeid
1038 self._changeid = changeid
1038 if changectx is not None:
1039 if changectx is not None:
1039 self._changectx = changectx
1040 self._changectx = changectx
1040 if fileid is not None:
1041 if fileid is not None:
1041 self._fileid = fileid
1042 self._fileid = fileid
1042
1043
1043 @propertycache
1044 @propertycache
1044 def _changectx(self):
1045 def _changectx(self):
1045 try:
1046 try:
1046 return changectx(self._repo, self._changeid)
1047 return changectx(self._repo, self._changeid)
1047 except error.FilteredRepoLookupError:
1048 except error.FilteredRepoLookupError:
1048 # Linkrev may point to any revision in the repository. When the
1049 # Linkrev may point to any revision in the repository. When the
1049 # repository is filtered this may lead to `filectx` trying to build
1050 # repository is filtered this may lead to `filectx` trying to build
1050 # `changectx` for filtered revision. In such case we fallback to
1051 # `changectx` for filtered revision. In such case we fallback to
1051 # creating `changectx` on the unfiltered version of the reposition.
1052 # creating `changectx` on the unfiltered version of the reposition.
1052 # This fallback should not be an issue because `changectx` from
1053 # This fallback should not be an issue because `changectx` from
1053 # `filectx` are not used in complex operations that care about
1054 # `filectx` are not used in complex operations that care about
1054 # filtering.
1055 # filtering.
1055 #
1056 #
1056 # This fallback is a cheap and dirty fix that prevent several
1057 # This fallback is a cheap and dirty fix that prevent several
1057 # crashes. It does not ensure the behavior is correct. However the
1058 # crashes. It does not ensure the behavior is correct. However the
1058 # behavior was not correct before filtering either and "incorrect
1059 # behavior was not correct before filtering either and "incorrect
1059 # behavior" is seen as better as "crash"
1060 # behavior" is seen as better as "crash"
1060 #
1061 #
1061 # Linkrevs have several serious troubles with filtering that are
1062 # Linkrevs have several serious troubles with filtering that are
1062 # complicated to solve. Proper handling of the issue here should be
1063 # complicated to solve. Proper handling of the issue here should be
1063 # considered when solving linkrev issue are on the table.
1064 # considered when solving linkrev issue are on the table.
1064 return changectx(self._repo.unfiltered(), self._changeid)
1065 return changectx(self._repo.unfiltered(), self._changeid)
1065
1066
1066 def filectx(self, fileid, changeid=None):
1067 def filectx(self, fileid, changeid=None):
1067 '''opens an arbitrary revision of the file without
1068 '''opens an arbitrary revision of the file without
1068 opening a new filelog'''
1069 opening a new filelog'''
1069 return filectx(self._repo, self._path, fileid=fileid,
1070 return filectx(self._repo, self._path, fileid=fileid,
1070 filelog=self._filelog, changeid=changeid)
1071 filelog=self._filelog, changeid=changeid)
1071
1072
1072 def data(self):
1073 def data(self):
1073 try:
1074 try:
1074 return self._filelog.read(self._filenode)
1075 return self._filelog.read(self._filenode)
1075 except error.CensoredNodeError:
1076 except error.CensoredNodeError:
1076 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1077 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1077 return ""
1078 return ""
1078 raise error.Abort(_("censored node: %s") % short(self._filenode),
1079 raise error.Abort(_("censored node: %s") % short(self._filenode),
1079 hint=_("set censor.policy to ignore errors"))
1080 hint=_("set censor.policy to ignore errors"))
1080
1081
1081 def size(self):
1082 def size(self):
1082 return self._filelog.size(self._filerev)
1083 return self._filelog.size(self._filerev)
1083
1084
1084 def renamed(self):
1085 def renamed(self):
1085 """check if file was actually renamed in this changeset revision
1086 """check if file was actually renamed in this changeset revision
1086
1087
1087 If rename logged in file revision, we report copy for changeset only
1088 If rename logged in file revision, we report copy for changeset only
1088 if file revisions linkrev points back to the changeset in question
1089 if file revisions linkrev points back to the changeset in question
1089 or both changeset parents contain different file revisions.
1090 or both changeset parents contain different file revisions.
1090 """
1091 """
1091
1092
1092 renamed = self._filelog.renamed(self._filenode)
1093 renamed = self._filelog.renamed(self._filenode)
1093 if not renamed:
1094 if not renamed:
1094 return renamed
1095 return renamed
1095
1096
1096 if self.rev() == self.linkrev():
1097 if self.rev() == self.linkrev():
1097 return renamed
1098 return renamed
1098
1099
1099 name = self.path()
1100 name = self.path()
1100 fnode = self._filenode
1101 fnode = self._filenode
1101 for p in self._changectx.parents():
1102 for p in self._changectx.parents():
1102 try:
1103 try:
1103 if fnode == p.filenode(name):
1104 if fnode == p.filenode(name):
1104 return None
1105 return None
1105 except error.LookupError:
1106 except error.LookupError:
1106 pass
1107 pass
1107 return renamed
1108 return renamed
1108
1109
1109 def children(self):
1110 def children(self):
1110 # hard for renames
1111 # hard for renames
1111 c = self._filelog.children(self._filenode)
1112 c = self._filelog.children(self._filenode)
1112 return [filectx(self._repo, self._path, fileid=x,
1113 return [filectx(self._repo, self._path, fileid=x,
1113 filelog=self._filelog) for x in c]
1114 filelog=self._filelog) for x in c]
1114
1115
1115 class committablectx(basectx):
1116 class committablectx(basectx):
1116 """A committablectx object provides common functionality for a context that
1117 """A committablectx object provides common functionality for a context that
1117 wants the ability to commit, e.g. workingctx or memctx."""
1118 wants the ability to commit, e.g. workingctx or memctx."""
1118 def __init__(self, repo, text="", user=None, date=None, extra=None,
1119 def __init__(self, repo, text="", user=None, date=None, extra=None,
1119 changes=None):
1120 changes=None):
1120 self._repo = repo
1121 self._repo = repo
1121 self._rev = None
1122 self._rev = None
1122 self._node = None
1123 self._node = None
1123 self._text = text
1124 self._text = text
1124 if date:
1125 if date:
1125 self._date = util.parsedate(date)
1126 self._date = util.parsedate(date)
1126 if user:
1127 if user:
1127 self._user = user
1128 self._user = user
1128 if changes:
1129 if changes:
1129 self._status = changes
1130 self._status = changes
1130
1131
1131 self._extra = {}
1132 self._extra = {}
1132 if extra:
1133 if extra:
1133 self._extra = extra.copy()
1134 self._extra = extra.copy()
1134 if 'branch' not in self._extra:
1135 if 'branch' not in self._extra:
1135 try:
1136 try:
1136 branch = encoding.fromlocal(self._repo.dirstate.branch())
1137 branch = encoding.fromlocal(self._repo.dirstate.branch())
1137 except UnicodeDecodeError:
1138 except UnicodeDecodeError:
1138 raise error.Abort(_('branch name not in UTF-8!'))
1139 raise error.Abort(_('branch name not in UTF-8!'))
1139 self._extra['branch'] = branch
1140 self._extra['branch'] = branch
1140 if self._extra['branch'] == '':
1141 if self._extra['branch'] == '':
1141 self._extra['branch'] = 'default'
1142 self._extra['branch'] = 'default'
1142
1143
1143 def __str__(self):
1144 def __str__(self):
1144 return str(self._parents[0]) + "+"
1145 return str(self._parents[0]) + "+"
1145
1146
1146 def __nonzero__(self):
1147 def __nonzero__(self):
1147 return True
1148 return True
1148
1149
1149 def _buildflagfunc(self):
1150 def _buildflagfunc(self):
1150 # Create a fallback function for getting file flags when the
1151 # Create a fallback function for getting file flags when the
1151 # filesystem doesn't support them
1152 # filesystem doesn't support them
1152
1153
1153 copiesget = self._repo.dirstate.copies().get
1154 copiesget = self._repo.dirstate.copies().get
1154
1155
1155 if len(self._parents) < 2:
1156 if len(self._parents) < 2:
1156 # when we have one parent, it's easy: copy from parent
1157 # when we have one parent, it's easy: copy from parent
1157 man = self._parents[0].manifest()
1158 man = self._parents[0].manifest()
1158 def func(f):
1159 def func(f):
1159 f = copiesget(f, f)
1160 f = copiesget(f, f)
1160 return man.flags(f)
1161 return man.flags(f)
1161 else:
1162 else:
1162 # merges are tricky: we try to reconstruct the unstored
1163 # merges are tricky: we try to reconstruct the unstored
1163 # result from the merge (issue1802)
1164 # result from the merge (issue1802)
1164 p1, p2 = self._parents
1165 p1, p2 = self._parents
1165 pa = p1.ancestor(p2)
1166 pa = p1.ancestor(p2)
1166 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1167 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1167
1168
1168 def func(f):
1169 def func(f):
1169 f = copiesget(f, f) # may be wrong for merges with copies
1170 f = copiesget(f, f) # may be wrong for merges with copies
1170 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1171 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1171 if fl1 == fl2:
1172 if fl1 == fl2:
1172 return fl1
1173 return fl1
1173 if fl1 == fla:
1174 if fl1 == fla:
1174 return fl2
1175 return fl2
1175 if fl2 == fla:
1176 if fl2 == fla:
1176 return fl1
1177 return fl1
1177 return '' # punt for conflicts
1178 return '' # punt for conflicts
1178
1179
1179 return func
1180 return func
1180
1181
1181 @propertycache
1182 @propertycache
1182 def _flagfunc(self):
1183 def _flagfunc(self):
1183 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1184 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1184
1185
1185 @propertycache
1186 @propertycache
1186 def _manifest(self):
1187 def _manifest(self):
1187 """generate a manifest corresponding to the values in self._status
1188 """generate a manifest corresponding to the values in self._status
1188
1189
1189 This reuse the file nodeid from parent, but we append an extra letter
1190 This reuse the file nodeid from parent, but we append an extra letter
1190 when modified. Modified files get an extra 'm' while added files get
1191 when modified. Modified files get an extra 'm' while added files get
1191 an extra 'a'. This is used by manifests merge to see that files
1192 an extra 'a'. This is used by manifests merge to see that files
1192 are different and by update logic to avoid deleting newly added files.
1193 are different and by update logic to avoid deleting newly added files.
1193 """
1194 """
1194
1195
1195 man1 = self._parents[0].manifest()
1196 man1 = self._parents[0].manifest()
1196 man = man1.copy()
1197 man = man1.copy()
1197 if len(self._parents) > 1:
1198 if len(self._parents) > 1:
1198 man2 = self.p2().manifest()
1199 man2 = self.p2().manifest()
1199 def getman(f):
1200 def getman(f):
1200 if f in man1:
1201 if f in man1:
1201 return man1
1202 return man1
1202 return man2
1203 return man2
1203 else:
1204 else:
1204 getman = lambda f: man1
1205 getman = lambda f: man1
1205
1206
1206 copied = self._repo.dirstate.copies()
1207 copied = self._repo.dirstate.copies()
1207 ff = self._flagfunc
1208 ff = self._flagfunc
1208 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1209 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1209 for f in l:
1210 for f in l:
1210 orig = copied.get(f, f)
1211 orig = copied.get(f, f)
1211 man[f] = getman(orig).get(orig, nullid) + i
1212 man[f] = getman(orig).get(orig, nullid) + i
1212 try:
1213 try:
1213 man.setflag(f, ff(f))
1214 man.setflag(f, ff(f))
1214 except OSError:
1215 except OSError:
1215 pass
1216 pass
1216
1217
1217 for f in self._status.deleted + self._status.removed:
1218 for f in self._status.deleted + self._status.removed:
1218 if f in man:
1219 if f in man:
1219 del man[f]
1220 del man[f]
1220
1221
1221 return man
1222 return man
1222
1223
1223 @propertycache
1224 @propertycache
1224 def _status(self):
1225 def _status(self):
1225 return self._repo.status()
1226 return self._repo.status()
1226
1227
1227 @propertycache
1228 @propertycache
1228 def _user(self):
1229 def _user(self):
1229 return self._repo.ui.username()
1230 return self._repo.ui.username()
1230
1231
1231 @propertycache
1232 @propertycache
1232 def _date(self):
1233 def _date(self):
1233 return util.makedate()
1234 return util.makedate()
1234
1235
1235 def subrev(self, subpath):
1236 def subrev(self, subpath):
1236 return None
1237 return None
1237
1238
1238 def manifestnode(self):
1239 def manifestnode(self):
1239 return None
1240 return None
1240 def user(self):
1241 def user(self):
1241 return self._user or self._repo.ui.username()
1242 return self._user or self._repo.ui.username()
1242 def date(self):
1243 def date(self):
1243 return self._date
1244 return self._date
1244 def description(self):
1245 def description(self):
1245 return self._text
1246 return self._text
1246 def files(self):
1247 def files(self):
1247 return sorted(self._status.modified + self._status.added +
1248 return sorted(self._status.modified + self._status.added +
1248 self._status.removed)
1249 self._status.removed)
1249
1250
1250 def modified(self):
1251 def modified(self):
1251 return self._status.modified
1252 return self._status.modified
1252 def added(self):
1253 def added(self):
1253 return self._status.added
1254 return self._status.added
1254 def removed(self):
1255 def removed(self):
1255 return self._status.removed
1256 return self._status.removed
1256 def deleted(self):
1257 def deleted(self):
1257 return self._status.deleted
1258 return self._status.deleted
1258 def branch(self):
1259 def branch(self):
1259 return encoding.tolocal(self._extra['branch'])
1260 return encoding.tolocal(self._extra['branch'])
1260 def closesbranch(self):
1261 def closesbranch(self):
1261 return 'close' in self._extra
1262 return 'close' in self._extra
1262 def extra(self):
1263 def extra(self):
1263 return self._extra
1264 return self._extra
1264
1265
1265 def tags(self):
1266 def tags(self):
1266 return []
1267 return []
1267
1268
1268 def bookmarks(self):
1269 def bookmarks(self):
1269 b = []
1270 b = []
1270 for p in self.parents():
1271 for p in self.parents():
1271 b.extend(p.bookmarks())
1272 b.extend(p.bookmarks())
1272 return b
1273 return b
1273
1274
1274 def phase(self):
1275 def phase(self):
1275 phase = phases.draft # default phase to draft
1276 phase = phases.draft # default phase to draft
1276 for p in self.parents():
1277 for p in self.parents():
1277 phase = max(phase, p.phase())
1278 phase = max(phase, p.phase())
1278 return phase
1279 return phase
1279
1280
1280 def hidden(self):
1281 def hidden(self):
1281 return False
1282 return False
1282
1283
1283 def children(self):
1284 def children(self):
1284 return []
1285 return []
1285
1286
1286 def flags(self, path):
1287 def flags(self, path):
1287 if '_manifest' in self.__dict__:
1288 if '_manifest' in self.__dict__:
1288 try:
1289 try:
1289 return self._manifest.flags(path)
1290 return self._manifest.flags(path)
1290 except KeyError:
1291 except KeyError:
1291 return ''
1292 return ''
1292
1293
1293 try:
1294 try:
1294 return self._flagfunc(path)
1295 return self._flagfunc(path)
1295 except OSError:
1296 except OSError:
1296 return ''
1297 return ''
1297
1298
1298 def ancestor(self, c2):
1299 def ancestor(self, c2):
1299 """return the "best" ancestor context of self and c2"""
1300 """return the "best" ancestor context of self and c2"""
1300 return self._parents[0].ancestor(c2) # punt on two parents for now
1301 return self._parents[0].ancestor(c2) # punt on two parents for now
1301
1302
1302 def walk(self, match):
1303 def walk(self, match):
1303 '''Generates matching file names.'''
1304 '''Generates matching file names.'''
1304 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1305 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1305 True, False))
1306 True, False))
1306
1307
1307 def matches(self, match):
1308 def matches(self, match):
1308 return sorted(self._repo.dirstate.matches(match))
1309 return sorted(self._repo.dirstate.matches(match))
1309
1310
1310 def ancestors(self):
1311 def ancestors(self):
1311 for p in self._parents:
1312 for p in self._parents:
1312 yield p
1313 yield p
1313 for a in self._repo.changelog.ancestors(
1314 for a in self._repo.changelog.ancestors(
1314 [p.rev() for p in self._parents]):
1315 [p.rev() for p in self._parents]):
1315 yield changectx(self._repo, a)
1316 yield changectx(self._repo, a)
1316
1317
1317 def markcommitted(self, node):
1318 def markcommitted(self, node):
1318 """Perform post-commit cleanup necessary after committing this ctx
1319 """Perform post-commit cleanup necessary after committing this ctx
1319
1320
1320 Specifically, this updates backing stores this working context
1321 Specifically, this updates backing stores this working context
1321 wraps to reflect the fact that the changes reflected by this
1322 wraps to reflect the fact that the changes reflected by this
1322 workingctx have been committed. For example, it marks
1323 workingctx have been committed. For example, it marks
1323 modified and added files as normal in the dirstate.
1324 modified and added files as normal in the dirstate.
1324
1325
1325 """
1326 """
1326
1327
1327 self._repo.dirstate.beginparentchange()
1328 self._repo.dirstate.beginparentchange()
1328 for f in self.modified() + self.added():
1329 for f in self.modified() + self.added():
1329 self._repo.dirstate.normal(f)
1330 self._repo.dirstate.normal(f)
1330 for f in self.removed():
1331 for f in self.removed():
1331 self._repo.dirstate.drop(f)
1332 self._repo.dirstate.drop(f)
1332 self._repo.dirstate.setparents(node)
1333 self._repo.dirstate.setparents(node)
1333 self._repo.dirstate.endparentchange()
1334 self._repo.dirstate.endparentchange()
1334
1335
1335 # write changes out explicitly, because nesting wlock at
1336 # write changes out explicitly, because nesting wlock at
1336 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1337 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1337 # from immediately doing so for subsequent changing files
1338 # from immediately doing so for subsequent changing files
1338 self._repo.dirstate.write(self._repo.currenttransaction())
1339 self._repo.dirstate.write(self._repo.currenttransaction())
1339
1340
1340 class workingctx(committablectx):
1341 class workingctx(committablectx):
1341 """A workingctx object makes access to data related to
1342 """A workingctx object makes access to data related to
1342 the current working directory convenient.
1343 the current working directory convenient.
1343 date - any valid date string or (unixtime, offset), or None.
1344 date - any valid date string or (unixtime, offset), or None.
1344 user - username string, or None.
1345 user - username string, or None.
1345 extra - a dictionary of extra values, or None.
1346 extra - a dictionary of extra values, or None.
1346 changes - a list of file lists as returned by localrepo.status()
1347 changes - a list of file lists as returned by localrepo.status()
1347 or None to use the repository status.
1348 or None to use the repository status.
1348 """
1349 """
1349 def __init__(self, repo, text="", user=None, date=None, extra=None,
1350 def __init__(self, repo, text="", user=None, date=None, extra=None,
1350 changes=None):
1351 changes=None):
1351 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1352 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1352
1353
1353 def __iter__(self):
1354 def __iter__(self):
1354 d = self._repo.dirstate
1355 d = self._repo.dirstate
1355 for f in d:
1356 for f in d:
1356 if d[f] != 'r':
1357 if d[f] != 'r':
1357 yield f
1358 yield f
1358
1359
1359 def __contains__(self, key):
1360 def __contains__(self, key):
1360 return self._repo.dirstate[key] not in "?r"
1361 return self._repo.dirstate[key] not in "?r"
1361
1362
1362 def hex(self):
1363 def hex(self):
1363 return hex(wdirid)
1364 return hex(wdirid)
1364
1365
1365 @propertycache
1366 @propertycache
1366 def _parents(self):
1367 def _parents(self):
1367 p = self._repo.dirstate.parents()
1368 p = self._repo.dirstate.parents()
1368 if p[1] == nullid:
1369 if p[1] == nullid:
1369 p = p[:-1]
1370 p = p[:-1]
1370 return [changectx(self._repo, x) for x in p]
1371 return [changectx(self._repo, x) for x in p]
1371
1372
1372 def filectx(self, path, filelog=None):
1373 def filectx(self, path, filelog=None):
1373 """get a file context from the working directory"""
1374 """get a file context from the working directory"""
1374 return workingfilectx(self._repo, path, workingctx=self,
1375 return workingfilectx(self._repo, path, workingctx=self,
1375 filelog=filelog)
1376 filelog=filelog)
1376
1377
1377 def dirty(self, missing=False, merge=True, branch=True):
1378 def dirty(self, missing=False, merge=True, branch=True):
1378 "check whether a working directory is modified"
1379 "check whether a working directory is modified"
1379 # check subrepos first
1380 # check subrepos first
1380 for s in sorted(self.substate):
1381 for s in sorted(self.substate):
1381 if self.sub(s).dirty():
1382 if self.sub(s).dirty():
1382 return True
1383 return True
1383 # check current working dir
1384 # check current working dir
1384 return ((merge and self.p2()) or
1385 return ((merge and self.p2()) or
1385 (branch and self.branch() != self.p1().branch()) or
1386 (branch and self.branch() != self.p1().branch()) or
1386 self.modified() or self.added() or self.removed() or
1387 self.modified() or self.added() or self.removed() or
1387 (missing and self.deleted()))
1388 (missing and self.deleted()))
1388
1389
1389 def add(self, list, prefix=""):
1390 def add(self, list, prefix=""):
1390 join = lambda f: os.path.join(prefix, f)
1391 join = lambda f: os.path.join(prefix, f)
1391 wlock = self._repo.wlock()
1392 wlock = self._repo.wlock()
1392 ui, ds = self._repo.ui, self._repo.dirstate
1393 ui, ds = self._repo.ui, self._repo.dirstate
1393 try:
1394 try:
1394 rejected = []
1395 rejected = []
1395 lstat = self._repo.wvfs.lstat
1396 lstat = self._repo.wvfs.lstat
1396 for f in list:
1397 for f in list:
1397 scmutil.checkportable(ui, join(f))
1398 scmutil.checkportable(ui, join(f))
1398 try:
1399 try:
1399 st = lstat(f)
1400 st = lstat(f)
1400 except OSError:
1401 except OSError:
1401 ui.warn(_("%s does not exist!\n") % join(f))
1402 ui.warn(_("%s does not exist!\n") % join(f))
1402 rejected.append(f)
1403 rejected.append(f)
1403 continue
1404 continue
1404 if st.st_size > 10000000:
1405 if st.st_size > 10000000:
1405 ui.warn(_("%s: up to %d MB of RAM may be required "
1406 ui.warn(_("%s: up to %d MB of RAM may be required "
1406 "to manage this file\n"
1407 "to manage this file\n"
1407 "(use 'hg revert %s' to cancel the "
1408 "(use 'hg revert %s' to cancel the "
1408 "pending addition)\n")
1409 "pending addition)\n")
1409 % (f, 3 * st.st_size // 1000000, join(f)))
1410 % (f, 3 * st.st_size // 1000000, join(f)))
1410 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1411 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1411 ui.warn(_("%s not added: only files and symlinks "
1412 ui.warn(_("%s not added: only files and symlinks "
1412 "supported currently\n") % join(f))
1413 "supported currently\n") % join(f))
1413 rejected.append(f)
1414 rejected.append(f)
1414 elif ds[f] in 'amn':
1415 elif ds[f] in 'amn':
1415 ui.warn(_("%s already tracked!\n") % join(f))
1416 ui.warn(_("%s already tracked!\n") % join(f))
1416 elif ds[f] == 'r':
1417 elif ds[f] == 'r':
1417 ds.normallookup(f)
1418 ds.normallookup(f)
1418 else:
1419 else:
1419 ds.add(f)
1420 ds.add(f)
1420 return rejected
1421 return rejected
1421 finally:
1422 finally:
1422 wlock.release()
1423 wlock.release()
1423
1424
1424 def forget(self, files, prefix=""):
1425 def forget(self, files, prefix=""):
1425 join = lambda f: os.path.join(prefix, f)
1426 join = lambda f: os.path.join(prefix, f)
1426 wlock = self._repo.wlock()
1427 wlock = self._repo.wlock()
1427 try:
1428 try:
1428 rejected = []
1429 rejected = []
1429 for f in files:
1430 for f in files:
1430 if f not in self._repo.dirstate:
1431 if f not in self._repo.dirstate:
1431 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1432 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1432 rejected.append(f)
1433 rejected.append(f)
1433 elif self._repo.dirstate[f] != 'a':
1434 elif self._repo.dirstate[f] != 'a':
1434 self._repo.dirstate.remove(f)
1435 self._repo.dirstate.remove(f)
1435 else:
1436 else:
1436 self._repo.dirstate.drop(f)
1437 self._repo.dirstate.drop(f)
1437 return rejected
1438 return rejected
1438 finally:
1439 finally:
1439 wlock.release()
1440 wlock.release()
1440
1441
1441 def undelete(self, list):
1442 def undelete(self, list):
1442 pctxs = self.parents()
1443 pctxs = self.parents()
1443 wlock = self._repo.wlock()
1444 wlock = self._repo.wlock()
1444 try:
1445 try:
1445 for f in list:
1446 for f in list:
1446 if self._repo.dirstate[f] != 'r':
1447 if self._repo.dirstate[f] != 'r':
1447 self._repo.ui.warn(_("%s not removed!\n") % f)
1448 self._repo.ui.warn(_("%s not removed!\n") % f)
1448 else:
1449 else:
1449 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1450 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1450 t = fctx.data()
1451 t = fctx.data()
1451 self._repo.wwrite(f, t, fctx.flags())
1452 self._repo.wwrite(f, t, fctx.flags())
1452 self._repo.dirstate.normal(f)
1453 self._repo.dirstate.normal(f)
1453 finally:
1454 finally:
1454 wlock.release()
1455 wlock.release()
1455
1456
1456 def copy(self, source, dest):
1457 def copy(self, source, dest):
1457 try:
1458 try:
1458 st = self._repo.wvfs.lstat(dest)
1459 st = self._repo.wvfs.lstat(dest)
1459 except OSError as err:
1460 except OSError as err:
1460 if err.errno != errno.ENOENT:
1461 if err.errno != errno.ENOENT:
1461 raise
1462 raise
1462 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1463 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1463 return
1464 return
1464 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1465 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1465 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1466 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1466 "symbolic link\n") % dest)
1467 "symbolic link\n") % dest)
1467 else:
1468 else:
1468 wlock = self._repo.wlock()
1469 wlock = self._repo.wlock()
1469 try:
1470 try:
1470 if self._repo.dirstate[dest] in '?':
1471 if self._repo.dirstate[dest] in '?':
1471 self._repo.dirstate.add(dest)
1472 self._repo.dirstate.add(dest)
1472 elif self._repo.dirstate[dest] in 'r':
1473 elif self._repo.dirstate[dest] in 'r':
1473 self._repo.dirstate.normallookup(dest)
1474 self._repo.dirstate.normallookup(dest)
1474 self._repo.dirstate.copy(source, dest)
1475 self._repo.dirstate.copy(source, dest)
1475 finally:
1476 finally:
1476 wlock.release()
1477 wlock.release()
1477
1478
1478 def match(self, pats=[], include=None, exclude=None, default='glob',
1479 def match(self, pats=[], include=None, exclude=None, default='glob',
1479 listsubrepos=False, badfn=None):
1480 listsubrepos=False, badfn=None):
1480 r = self._repo
1481 r = self._repo
1481
1482
1482 # Only a case insensitive filesystem needs magic to translate user input
1483 # Only a case insensitive filesystem needs magic to translate user input
1483 # to actual case in the filesystem.
1484 # to actual case in the filesystem.
1484 if not util.checkcase(r.root):
1485 if not util.checkcase(r.root):
1485 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1486 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1486 exclude, default, r.auditor, self,
1487 exclude, default, r.auditor, self,
1487 listsubrepos=listsubrepos,
1488 listsubrepos=listsubrepos,
1488 badfn=badfn)
1489 badfn=badfn)
1489 return matchmod.match(r.root, r.getcwd(), pats,
1490 return matchmod.match(r.root, r.getcwd(), pats,
1490 include, exclude, default,
1491 include, exclude, default,
1491 auditor=r.auditor, ctx=self,
1492 auditor=r.auditor, ctx=self,
1492 listsubrepos=listsubrepos, badfn=badfn)
1493 listsubrepos=listsubrepos, badfn=badfn)
1493
1494
1494 def _filtersuspectsymlink(self, files):
1495 def _filtersuspectsymlink(self, files):
1495 if not files or self._repo.dirstate._checklink:
1496 if not files or self._repo.dirstate._checklink:
1496 return files
1497 return files
1497
1498
1498 # Symlink placeholders may get non-symlink-like contents
1499 # Symlink placeholders may get non-symlink-like contents
1499 # via user error or dereferencing by NFS or Samba servers,
1500 # via user error or dereferencing by NFS or Samba servers,
1500 # so we filter out any placeholders that don't look like a
1501 # so we filter out any placeholders that don't look like a
1501 # symlink
1502 # symlink
1502 sane = []
1503 sane = []
1503 for f in files:
1504 for f in files:
1504 if self.flags(f) == 'l':
1505 if self.flags(f) == 'l':
1505 d = self[f].data()
1506 d = self[f].data()
1506 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1507 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1507 self._repo.ui.debug('ignoring suspect symlink placeholder'
1508 self._repo.ui.debug('ignoring suspect symlink placeholder'
1508 ' "%s"\n' % f)
1509 ' "%s"\n' % f)
1509 continue
1510 continue
1510 sane.append(f)
1511 sane.append(f)
1511 return sane
1512 return sane
1512
1513
1513 def _checklookup(self, files):
1514 def _checklookup(self, files):
1514 # check for any possibly clean files
1515 # check for any possibly clean files
1515 if not files:
1516 if not files:
1516 return [], []
1517 return [], []
1517
1518
1518 modified = []
1519 modified = []
1519 fixup = []
1520 fixup = []
1520 pctx = self._parents[0]
1521 pctx = self._parents[0]
1521 # do a full compare of any files that might have changed
1522 # do a full compare of any files that might have changed
1522 for f in sorted(files):
1523 for f in sorted(files):
1523 if (f not in pctx or self.flags(f) != pctx.flags(f)
1524 if (f not in pctx or self.flags(f) != pctx.flags(f)
1524 or pctx[f].cmp(self[f])):
1525 or pctx[f].cmp(self[f])):
1525 modified.append(f)
1526 modified.append(f)
1526 else:
1527 else:
1527 fixup.append(f)
1528 fixup.append(f)
1528
1529
1529 # update dirstate for files that are actually clean
1530 # update dirstate for files that are actually clean
1530 if fixup:
1531 if fixup:
1531 try:
1532 try:
1532 # updating the dirstate is optional
1533 # updating the dirstate is optional
1533 # so we don't wait on the lock
1534 # so we don't wait on the lock
1534 # wlock can invalidate the dirstate, so cache normal _after_
1535 # wlock can invalidate the dirstate, so cache normal _after_
1535 # taking the lock
1536 # taking the lock
1536 wlock = self._repo.wlock(False)
1537 wlock = self._repo.wlock(False)
1537 normal = self._repo.dirstate.normal
1538 normal = self._repo.dirstate.normal
1538 try:
1539 try:
1539 for f in fixup:
1540 for f in fixup:
1540 normal(f)
1541 normal(f)
1541 # write changes out explicitly, because nesting
1542 # write changes out explicitly, because nesting
1542 # wlock at runtime may prevent 'wlock.release()'
1543 # wlock at runtime may prevent 'wlock.release()'
1543 # below from doing so for subsequent changing files
1544 # below from doing so for subsequent changing files
1544 self._repo.dirstate.write(self._repo.currenttransaction())
1545 self._repo.dirstate.write(self._repo.currenttransaction())
1545 finally:
1546 finally:
1546 wlock.release()
1547 wlock.release()
1547 except error.LockError:
1548 except error.LockError:
1548 pass
1549 pass
1549 return modified, fixup
1550 return modified, fixup
1550
1551
1551 def _manifestmatches(self, match, s):
1552 def _manifestmatches(self, match, s):
1552 """Slow path for workingctx
1553 """Slow path for workingctx
1553
1554
1554 The fast path is when we compare the working directory to its parent
1555 The fast path is when we compare the working directory to its parent
1555 which means this function is comparing with a non-parent; therefore we
1556 which means this function is comparing with a non-parent; therefore we
1556 need to build a manifest and return what matches.
1557 need to build a manifest and return what matches.
1557 """
1558 """
1558 mf = self._repo['.']._manifestmatches(match, s)
1559 mf = self._repo['.']._manifestmatches(match, s)
1559 for f in s.modified + s.added:
1560 for f in s.modified + s.added:
1560 mf[f] = _newnode
1561 mf[f] = _newnode
1561 mf.setflag(f, self.flags(f))
1562 mf.setflag(f, self.flags(f))
1562 for f in s.removed:
1563 for f in s.removed:
1563 if f in mf:
1564 if f in mf:
1564 del mf[f]
1565 del mf[f]
1565 return mf
1566 return mf
1566
1567
1567 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1568 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1568 unknown=False):
1569 unknown=False):
1569 '''Gets the status from the dirstate -- internal use only.'''
1570 '''Gets the status from the dirstate -- internal use only.'''
1570 listignored, listclean, listunknown = ignored, clean, unknown
1571 listignored, listclean, listunknown = ignored, clean, unknown
1571 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1572 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1572 subrepos = []
1573 subrepos = []
1573 if '.hgsub' in self:
1574 if '.hgsub' in self:
1574 subrepos = sorted(self.substate)
1575 subrepos = sorted(self.substate)
1575 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1576 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1576 listclean, listunknown)
1577 listclean, listunknown)
1577
1578
1578 # check for any possibly clean files
1579 # check for any possibly clean files
1579 if cmp:
1580 if cmp:
1580 modified2, fixup = self._checklookup(cmp)
1581 modified2, fixup = self._checklookup(cmp)
1581 s.modified.extend(modified2)
1582 s.modified.extend(modified2)
1582
1583
1583 # update dirstate for files that are actually clean
1584 # update dirstate for files that are actually clean
1584 if fixup and listclean:
1585 if fixup and listclean:
1585 s.clean.extend(fixup)
1586 s.clean.extend(fixup)
1586
1587
1587 if match.always():
1588 if match.always():
1588 # cache for performance
1589 # cache for performance
1589 if s.unknown or s.ignored or s.clean:
1590 if s.unknown or s.ignored or s.clean:
1590 # "_status" is cached with list*=False in the normal route
1591 # "_status" is cached with list*=False in the normal route
1591 self._status = scmutil.status(s.modified, s.added, s.removed,
1592 self._status = scmutil.status(s.modified, s.added, s.removed,
1592 s.deleted, [], [], [])
1593 s.deleted, [], [], [])
1593 else:
1594 else:
1594 self._status = s
1595 self._status = s
1595
1596
1596 return s
1597 return s
1597
1598
1598 def _buildstatus(self, other, s, match, listignored, listclean,
1599 def _buildstatus(self, other, s, match, listignored, listclean,
1599 listunknown):
1600 listunknown):
1600 """build a status with respect to another context
1601 """build a status with respect to another context
1601
1602
1602 This includes logic for maintaining the fast path of status when
1603 This includes logic for maintaining the fast path of status when
1603 comparing the working directory against its parent, which is to skip
1604 comparing the working directory against its parent, which is to skip
1604 building a new manifest if self (working directory) is not comparing
1605 building a new manifest if self (working directory) is not comparing
1605 against its parent (repo['.']).
1606 against its parent (repo['.']).
1606 """
1607 """
1607 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1608 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1608 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1609 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1609 # might have accidentally ended up with the entire contents of the file
1610 # might have accidentally ended up with the entire contents of the file
1610 # they are supposed to be linking to.
1611 # they are supposed to be linking to.
1611 s.modified[:] = self._filtersuspectsymlink(s.modified)
1612 s.modified[:] = self._filtersuspectsymlink(s.modified)
1612 if other != self._repo['.']:
1613 if other != self._repo['.']:
1613 s = super(workingctx, self)._buildstatus(other, s, match,
1614 s = super(workingctx, self)._buildstatus(other, s, match,
1614 listignored, listclean,
1615 listignored, listclean,
1615 listunknown)
1616 listunknown)
1616 return s
1617 return s
1617
1618
1618 def _matchstatus(self, other, match):
1619 def _matchstatus(self, other, match):
1619 """override the match method with a filter for directory patterns
1620 """override the match method with a filter for directory patterns
1620
1621
1621 We use inheritance to customize the match.bad method only in cases of
1622 We use inheritance to customize the match.bad method only in cases of
1622 workingctx since it belongs only to the working directory when
1623 workingctx since it belongs only to the working directory when
1623 comparing against the parent changeset.
1624 comparing against the parent changeset.
1624
1625
1625 If we aren't comparing against the working directory's parent, then we
1626 If we aren't comparing against the working directory's parent, then we
1626 just use the default match object sent to us.
1627 just use the default match object sent to us.
1627 """
1628 """
1628 superself = super(workingctx, self)
1629 superself = super(workingctx, self)
1629 match = superself._matchstatus(other, match)
1630 match = superself._matchstatus(other, match)
1630 if other != self._repo['.']:
1631 if other != self._repo['.']:
1631 def bad(f, msg):
1632 def bad(f, msg):
1632 # 'f' may be a directory pattern from 'match.files()',
1633 # 'f' may be a directory pattern from 'match.files()',
1633 # so 'f not in ctx1' is not enough
1634 # so 'f not in ctx1' is not enough
1634 if f not in other and not other.hasdir(f):
1635 if f not in other and not other.hasdir(f):
1635 self._repo.ui.warn('%s: %s\n' %
1636 self._repo.ui.warn('%s: %s\n' %
1636 (self._repo.dirstate.pathto(f), msg))
1637 (self._repo.dirstate.pathto(f), msg))
1637 match.bad = bad
1638 match.bad = bad
1638 return match
1639 return match
1639
1640
1640 class committablefilectx(basefilectx):
1641 class committablefilectx(basefilectx):
1641 """A committablefilectx provides common functionality for a file context
1642 """A committablefilectx provides common functionality for a file context
1642 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1643 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1643 def __init__(self, repo, path, filelog=None, ctx=None):
1644 def __init__(self, repo, path, filelog=None, ctx=None):
1644 self._repo = repo
1645 self._repo = repo
1645 self._path = path
1646 self._path = path
1646 self._changeid = None
1647 self._changeid = None
1647 self._filerev = self._filenode = None
1648 self._filerev = self._filenode = None
1648
1649
1649 if filelog is not None:
1650 if filelog is not None:
1650 self._filelog = filelog
1651 self._filelog = filelog
1651 if ctx:
1652 if ctx:
1652 self._changectx = ctx
1653 self._changectx = ctx
1653
1654
1654 def __nonzero__(self):
1655 def __nonzero__(self):
1655 return True
1656 return True
1656
1657
1657 def linkrev(self):
1658 def linkrev(self):
1658 # linked to self._changectx no matter if file is modified or not
1659 # linked to self._changectx no matter if file is modified or not
1659 return self.rev()
1660 return self.rev()
1660
1661
1661 def parents(self):
1662 def parents(self):
1662 '''return parent filectxs, following copies if necessary'''
1663 '''return parent filectxs, following copies if necessary'''
1663 def filenode(ctx, path):
1664 def filenode(ctx, path):
1664 return ctx._manifest.get(path, nullid)
1665 return ctx._manifest.get(path, nullid)
1665
1666
1666 path = self._path
1667 path = self._path
1667 fl = self._filelog
1668 fl = self._filelog
1668 pcl = self._changectx._parents
1669 pcl = self._changectx._parents
1669 renamed = self.renamed()
1670 renamed = self.renamed()
1670
1671
1671 if renamed:
1672 if renamed:
1672 pl = [renamed + (None,)]
1673 pl = [renamed + (None,)]
1673 else:
1674 else:
1674 pl = [(path, filenode(pcl[0], path), fl)]
1675 pl = [(path, filenode(pcl[0], path), fl)]
1675
1676
1676 for pc in pcl[1:]:
1677 for pc in pcl[1:]:
1677 pl.append((path, filenode(pc, path), fl))
1678 pl.append((path, filenode(pc, path), fl))
1678
1679
1679 return [self._parentfilectx(p, fileid=n, filelog=l)
1680 return [self._parentfilectx(p, fileid=n, filelog=l)
1680 for p, n, l in pl if n != nullid]
1681 for p, n, l in pl if n != nullid]
1681
1682
1682 def children(self):
1683 def children(self):
1683 return []
1684 return []
1684
1685
1685 class workingfilectx(committablefilectx):
1686 class workingfilectx(committablefilectx):
1686 """A workingfilectx object makes access to data related to a particular
1687 """A workingfilectx object makes access to data related to a particular
1687 file in the working directory convenient."""
1688 file in the working directory convenient."""
1688 def __init__(self, repo, path, filelog=None, workingctx=None):
1689 def __init__(self, repo, path, filelog=None, workingctx=None):
1689 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1690 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1690
1691
1691 @propertycache
1692 @propertycache
1692 def _changectx(self):
1693 def _changectx(self):
1693 return workingctx(self._repo)
1694 return workingctx(self._repo)
1694
1695
1695 def data(self):
1696 def data(self):
1696 return self._repo.wread(self._path)
1697 return self._repo.wread(self._path)
1697 def renamed(self):
1698 def renamed(self):
1698 rp = self._repo.dirstate.copied(self._path)
1699 rp = self._repo.dirstate.copied(self._path)
1699 if not rp:
1700 if not rp:
1700 return None
1701 return None
1701 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1702 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1702
1703
1703 def size(self):
1704 def size(self):
1704 return self._repo.wvfs.lstat(self._path).st_size
1705 return self._repo.wvfs.lstat(self._path).st_size
1705 def date(self):
1706 def date(self):
1706 t, tz = self._changectx.date()
1707 t, tz = self._changectx.date()
1707 try:
1708 try:
1708 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1709 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1709 except OSError as err:
1710 except OSError as err:
1710 if err.errno != errno.ENOENT:
1711 if err.errno != errno.ENOENT:
1711 raise
1712 raise
1712 return (t, tz)
1713 return (t, tz)
1713
1714
1714 def cmp(self, fctx):
1715 def cmp(self, fctx):
1715 """compare with other file context
1716 """compare with other file context
1716
1717
1717 returns True if different than fctx.
1718 returns True if different than fctx.
1718 """
1719 """
1719 # fctx should be a filectx (not a workingfilectx)
1720 # fctx should be a filectx (not a workingfilectx)
1720 # invert comparison to reuse the same code path
1721 # invert comparison to reuse the same code path
1721 return fctx.cmp(self)
1722 return fctx.cmp(self)
1722
1723
1723 def remove(self, ignoremissing=False):
1724 def remove(self, ignoremissing=False):
1724 """wraps unlink for a repo's working directory"""
1725 """wraps unlink for a repo's working directory"""
1725 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1726 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1726
1727
1727 def write(self, data, flags):
1728 def write(self, data, flags):
1728 """wraps repo.wwrite"""
1729 """wraps repo.wwrite"""
1729 self._repo.wwrite(self._path, data, flags)
1730 self._repo.wwrite(self._path, data, flags)
1730
1731
1731 class workingcommitctx(workingctx):
1732 class workingcommitctx(workingctx):
1732 """A workingcommitctx object makes access to data related to
1733 """A workingcommitctx object makes access to data related to
1733 the revision being committed convenient.
1734 the revision being committed convenient.
1734
1735
1735 This hides changes in the working directory, if they aren't
1736 This hides changes in the working directory, if they aren't
1736 committed in this context.
1737 committed in this context.
1737 """
1738 """
1738 def __init__(self, repo, changes,
1739 def __init__(self, repo, changes,
1739 text="", user=None, date=None, extra=None):
1740 text="", user=None, date=None, extra=None):
1740 super(workingctx, self).__init__(repo, text, user, date, extra,
1741 super(workingctx, self).__init__(repo, text, user, date, extra,
1741 changes)
1742 changes)
1742
1743
1743 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1744 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1744 unknown=False):
1745 unknown=False):
1745 """Return matched files only in ``self._status``
1746 """Return matched files only in ``self._status``
1746
1747
1747 Uncommitted files appear "clean" via this context, even if
1748 Uncommitted files appear "clean" via this context, even if
1748 they aren't actually so in the working directory.
1749 they aren't actually so in the working directory.
1749 """
1750 """
1750 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1751 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1751 if clean:
1752 if clean:
1752 clean = [f for f in self._manifest if f not in self._changedset]
1753 clean = [f for f in self._manifest if f not in self._changedset]
1753 else:
1754 else:
1754 clean = []
1755 clean = []
1755 return scmutil.status([f for f in self._status.modified if match(f)],
1756 return scmutil.status([f for f in self._status.modified if match(f)],
1756 [f for f in self._status.added if match(f)],
1757 [f for f in self._status.added if match(f)],
1757 [f for f in self._status.removed if match(f)],
1758 [f for f in self._status.removed if match(f)],
1758 [], [], [], clean)
1759 [], [], [], clean)
1759
1760
1760 @propertycache
1761 @propertycache
1761 def _changedset(self):
1762 def _changedset(self):
1762 """Return the set of files changed in this context
1763 """Return the set of files changed in this context
1763 """
1764 """
1764 changed = set(self._status.modified)
1765 changed = set(self._status.modified)
1765 changed.update(self._status.added)
1766 changed.update(self._status.added)
1766 changed.update(self._status.removed)
1767 changed.update(self._status.removed)
1767 return changed
1768 return changed
1768
1769
1769 class memctx(committablectx):
1770 class memctx(committablectx):
1770 """Use memctx to perform in-memory commits via localrepo.commitctx().
1771 """Use memctx to perform in-memory commits via localrepo.commitctx().
1771
1772
1772 Revision information is supplied at initialization time while
1773 Revision information is supplied at initialization time while
1773 related files data and is made available through a callback
1774 related files data and is made available through a callback
1774 mechanism. 'repo' is the current localrepo, 'parents' is a
1775 mechanism. 'repo' is the current localrepo, 'parents' is a
1775 sequence of two parent revisions identifiers (pass None for every
1776 sequence of two parent revisions identifiers (pass None for every
1776 missing parent), 'text' is the commit message and 'files' lists
1777 missing parent), 'text' is the commit message and 'files' lists
1777 names of files touched by the revision (normalized and relative to
1778 names of files touched by the revision (normalized and relative to
1778 repository root).
1779 repository root).
1779
1780
1780 filectxfn(repo, memctx, path) is a callable receiving the
1781 filectxfn(repo, memctx, path) is a callable receiving the
1781 repository, the current memctx object and the normalized path of
1782 repository, the current memctx object and the normalized path of
1782 requested file, relative to repository root. It is fired by the
1783 requested file, relative to repository root. It is fired by the
1783 commit function for every file in 'files', but calls order is
1784 commit function for every file in 'files', but calls order is
1784 undefined. If the file is available in the revision being
1785 undefined. If the file is available in the revision being
1785 committed (updated or added), filectxfn returns a memfilectx
1786 committed (updated or added), filectxfn returns a memfilectx
1786 object. If the file was removed, filectxfn raises an
1787 object. If the file was removed, filectxfn raises an
1787 IOError. Moved files are represented by marking the source file
1788 IOError. Moved files are represented by marking the source file
1788 removed and the new file added with copy information (see
1789 removed and the new file added with copy information (see
1789 memfilectx).
1790 memfilectx).
1790
1791
1791 user receives the committer name and defaults to current
1792 user receives the committer name and defaults to current
1792 repository username, date is the commit date in any format
1793 repository username, date is the commit date in any format
1793 supported by util.parsedate() and defaults to current date, extra
1794 supported by util.parsedate() and defaults to current date, extra
1794 is a dictionary of metadata or is left empty.
1795 is a dictionary of metadata or is left empty.
1795 """
1796 """
1796
1797
1797 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1798 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1798 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1799 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1799 # this field to determine what to do in filectxfn.
1800 # this field to determine what to do in filectxfn.
1800 _returnnoneformissingfiles = True
1801 _returnnoneformissingfiles = True
1801
1802
1802 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1803 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1803 date=None, extra=None, editor=False):
1804 date=None, extra=None, editor=False):
1804 super(memctx, self).__init__(repo, text, user, date, extra)
1805 super(memctx, self).__init__(repo, text, user, date, extra)
1805 self._rev = None
1806 self._rev = None
1806 self._node = None
1807 self._node = None
1807 parents = [(p or nullid) for p in parents]
1808 parents = [(p or nullid) for p in parents]
1808 p1, p2 = parents
1809 p1, p2 = parents
1809 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1810 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1810 files = sorted(set(files))
1811 files = sorted(set(files))
1811 self._files = files
1812 self._files = files
1812 self.substate = {}
1813 self.substate = {}
1813
1814
1814 # if store is not callable, wrap it in a function
1815 # if store is not callable, wrap it in a function
1815 if not callable(filectxfn):
1816 if not callable(filectxfn):
1816 def getfilectx(repo, memctx, path):
1817 def getfilectx(repo, memctx, path):
1817 fctx = filectxfn[path]
1818 fctx = filectxfn[path]
1818 # this is weird but apparently we only keep track of one parent
1819 # this is weird but apparently we only keep track of one parent
1819 # (why not only store that instead of a tuple?)
1820 # (why not only store that instead of a tuple?)
1820 copied = fctx.renamed()
1821 copied = fctx.renamed()
1821 if copied:
1822 if copied:
1822 copied = copied[0]
1823 copied = copied[0]
1823 return memfilectx(repo, path, fctx.data(),
1824 return memfilectx(repo, path, fctx.data(),
1824 islink=fctx.islink(), isexec=fctx.isexec(),
1825 islink=fctx.islink(), isexec=fctx.isexec(),
1825 copied=copied, memctx=memctx)
1826 copied=copied, memctx=memctx)
1826 self._filectxfn = getfilectx
1827 self._filectxfn = getfilectx
1827 else:
1828 else:
1828 # "util.cachefunc" reduces invocation of possibly expensive
1829 # "util.cachefunc" reduces invocation of possibly expensive
1829 # "filectxfn" for performance (e.g. converting from another VCS)
1830 # "filectxfn" for performance (e.g. converting from another VCS)
1830 self._filectxfn = util.cachefunc(filectxfn)
1831 self._filectxfn = util.cachefunc(filectxfn)
1831
1832
1832 if extra:
1833 if extra:
1833 self._extra = extra.copy()
1834 self._extra = extra.copy()
1834 else:
1835 else:
1835 self._extra = {}
1836 self._extra = {}
1836
1837
1837 if self._extra.get('branch', '') == '':
1838 if self._extra.get('branch', '') == '':
1838 self._extra['branch'] = 'default'
1839 self._extra['branch'] = 'default'
1839
1840
1840 if editor:
1841 if editor:
1841 self._text = editor(self._repo, self, [])
1842 self._text = editor(self._repo, self, [])
1842 self._repo.savecommitmessage(self._text)
1843 self._repo.savecommitmessage(self._text)
1843
1844
1844 def filectx(self, path, filelog=None):
1845 def filectx(self, path, filelog=None):
1845 """get a file context from the working directory
1846 """get a file context from the working directory
1846
1847
1847 Returns None if file doesn't exist and should be removed."""
1848 Returns None if file doesn't exist and should be removed."""
1848 return self._filectxfn(self._repo, self, path)
1849 return self._filectxfn(self._repo, self, path)
1849
1850
1850 def commit(self):
1851 def commit(self):
1851 """commit context to the repo"""
1852 """commit context to the repo"""
1852 return self._repo.commitctx(self)
1853 return self._repo.commitctx(self)
1853
1854
1854 @propertycache
1855 @propertycache
1855 def _manifest(self):
1856 def _manifest(self):
1856 """generate a manifest based on the return values of filectxfn"""
1857 """generate a manifest based on the return values of filectxfn"""
1857
1858
1858 # keep this simple for now; just worry about p1
1859 # keep this simple for now; just worry about p1
1859 pctx = self._parents[0]
1860 pctx = self._parents[0]
1860 man = pctx.manifest().copy()
1861 man = pctx.manifest().copy()
1861
1862
1862 for f in self._status.modified:
1863 for f in self._status.modified:
1863 p1node = nullid
1864 p1node = nullid
1864 p2node = nullid
1865 p2node = nullid
1865 p = pctx[f].parents() # if file isn't in pctx, check p2?
1866 p = pctx[f].parents() # if file isn't in pctx, check p2?
1866 if len(p) > 0:
1867 if len(p) > 0:
1867 p1node = p[0].node()
1868 p1node = p[0].node()
1868 if len(p) > 1:
1869 if len(p) > 1:
1869 p2node = p[1].node()
1870 p2node = p[1].node()
1870 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1871 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1871
1872
1872 for f in self._status.added:
1873 for f in self._status.added:
1873 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1874 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1874
1875
1875 for f in self._status.removed:
1876 for f in self._status.removed:
1876 if f in man:
1877 if f in man:
1877 del man[f]
1878 del man[f]
1878
1879
1879 return man
1880 return man
1880
1881
1881 @propertycache
1882 @propertycache
1882 def _status(self):
1883 def _status(self):
1883 """Calculate exact status from ``files`` specified at construction
1884 """Calculate exact status from ``files`` specified at construction
1884 """
1885 """
1885 man1 = self.p1().manifest()
1886 man1 = self.p1().manifest()
1886 p2 = self._parents[1]
1887 p2 = self._parents[1]
1887 # "1 < len(self._parents)" can't be used for checking
1888 # "1 < len(self._parents)" can't be used for checking
1888 # existence of the 2nd parent, because "memctx._parents" is
1889 # existence of the 2nd parent, because "memctx._parents" is
1889 # explicitly initialized by the list, of which length is 2.
1890 # explicitly initialized by the list, of which length is 2.
1890 if p2.node() != nullid:
1891 if p2.node() != nullid:
1891 man2 = p2.manifest()
1892 man2 = p2.manifest()
1892 managing = lambda f: f in man1 or f in man2
1893 managing = lambda f: f in man1 or f in man2
1893 else:
1894 else:
1894 managing = lambda f: f in man1
1895 managing = lambda f: f in man1
1895
1896
1896 modified, added, removed = [], [], []
1897 modified, added, removed = [], [], []
1897 for f in self._files:
1898 for f in self._files:
1898 if not managing(f):
1899 if not managing(f):
1899 added.append(f)
1900 added.append(f)
1900 elif self[f]:
1901 elif self[f]:
1901 modified.append(f)
1902 modified.append(f)
1902 else:
1903 else:
1903 removed.append(f)
1904 removed.append(f)
1904
1905
1905 return scmutil.status(modified, added, removed, [], [], [], [])
1906 return scmutil.status(modified, added, removed, [], [], [], [])
1906
1907
1907 class memfilectx(committablefilectx):
1908 class memfilectx(committablefilectx):
1908 """memfilectx represents an in-memory file to commit.
1909 """memfilectx represents an in-memory file to commit.
1909
1910
1910 See memctx and committablefilectx for more details.
1911 See memctx and committablefilectx for more details.
1911 """
1912 """
1912 def __init__(self, repo, path, data, islink=False,
1913 def __init__(self, repo, path, data, islink=False,
1913 isexec=False, copied=None, memctx=None):
1914 isexec=False, copied=None, memctx=None):
1914 """
1915 """
1915 path is the normalized file path relative to repository root.
1916 path is the normalized file path relative to repository root.
1916 data is the file content as a string.
1917 data is the file content as a string.
1917 islink is True if the file is a symbolic link.
1918 islink is True if the file is a symbolic link.
1918 isexec is True if the file is executable.
1919 isexec is True if the file is executable.
1919 copied is the source file path if current file was copied in the
1920 copied is the source file path if current file was copied in the
1920 revision being committed, or None."""
1921 revision being committed, or None."""
1921 super(memfilectx, self).__init__(repo, path, None, memctx)
1922 super(memfilectx, self).__init__(repo, path, None, memctx)
1922 self._data = data
1923 self._data = data
1923 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1924 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1924 self._copied = None
1925 self._copied = None
1925 if copied:
1926 if copied:
1926 self._copied = (copied, nullid)
1927 self._copied = (copied, nullid)
1927
1928
1928 def data(self):
1929 def data(self):
1929 return self._data
1930 return self._data
1930 def size(self):
1931 def size(self):
1931 return len(self.data())
1932 return len(self.data())
1932 def flags(self):
1933 def flags(self):
1933 return self._flags
1934 return self._flags
1934 def renamed(self):
1935 def renamed(self):
1935 return self._copied
1936 return self._copied
1936
1937
1937 def remove(self, ignoremissing=False):
1938 def remove(self, ignoremissing=False):
1938 """wraps unlink for a repo's working directory"""
1939 """wraps unlink for a repo's working directory"""
1939 # need to figure out what to do here
1940 # need to figure out what to do here
1940 del self._changectx[self._path]
1941 del self._changectx[self._path]
1941
1942
1942 def write(self, data, flags):
1943 def write(self, data, flags):
1943 """wraps repo.wwrite"""
1944 """wraps repo.wwrite"""
1944 self._data = data
1945 self._data = data
General Comments 0
You need to be logged in to leave comments. Login now