##// END OF EJS Templates
status: don't crash if a lookup file disappears...
Siddharth Agarwal -
r32651:c850f0ed 4.2.1 stable
parent child Browse files
Show More
@@ -1,2174 +1,2187
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 revlog,
38 revlog,
39 scmutil,
39 scmutil,
40 subrepo,
40 subrepo,
41 util,
41 util,
42 )
42 )
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45
45
46 nonascii = re.compile(r'[^\x21-\x7f]').search
46 nonascii = re.compile(r'[^\x21-\x7f]').search
47
47
48 class basectx(object):
48 class basectx(object):
49 """A basectx object represents the common logic for its children:
49 """A basectx object represents the common logic for its children:
50 changectx: read-only context that is already present in the repo,
50 changectx: read-only context that is already present in the repo,
51 workingctx: a context that represents the working directory and can
51 workingctx: a context that represents the working directory and can
52 be committed,
52 be committed,
53 memctx: a context that represents changes in-memory and can also
53 memctx: a context that represents changes in-memory and can also
54 be committed."""
54 be committed."""
55 def __new__(cls, repo, changeid='', *args, **kwargs):
55 def __new__(cls, repo, changeid='', *args, **kwargs):
56 if isinstance(changeid, basectx):
56 if isinstance(changeid, basectx):
57 return changeid
57 return changeid
58
58
59 o = super(basectx, cls).__new__(cls)
59 o = super(basectx, cls).__new__(cls)
60
60
61 o._repo = repo
61 o._repo = repo
62 o._rev = nullrev
62 o._rev = nullrev
63 o._node = nullid
63 o._node = nullid
64
64
65 return o
65 return o
66
66
67 def __str__(self):
67 def __str__(self):
68 r = short(self.node())
68 r = short(self.node())
69 if pycompat.ispy3:
69 if pycompat.ispy3:
70 return r.decode('ascii')
70 return r.decode('ascii')
71 return r
71 return r
72
72
73 def __bytes__(self):
73 def __bytes__(self):
74 return short(self.node())
74 return short(self.node())
75
75
76 def __int__(self):
76 def __int__(self):
77 return self.rev()
77 return self.rev()
78
78
79 def __repr__(self):
79 def __repr__(self):
80 return "<%s %s>" % (type(self).__name__, str(self))
80 return "<%s %s>" % (type(self).__name__, str(self))
81
81
82 def __eq__(self, other):
82 def __eq__(self, other):
83 try:
83 try:
84 return type(self) == type(other) and self._rev == other._rev
84 return type(self) == type(other) and self._rev == other._rev
85 except AttributeError:
85 except AttributeError:
86 return False
86 return False
87
87
88 def __ne__(self, other):
88 def __ne__(self, other):
89 return not (self == other)
89 return not (self == other)
90
90
91 def __contains__(self, key):
91 def __contains__(self, key):
92 return key in self._manifest
92 return key in self._manifest
93
93
94 def __getitem__(self, key):
94 def __getitem__(self, key):
95 return self.filectx(key)
95 return self.filectx(key)
96
96
97 def __iter__(self):
97 def __iter__(self):
98 return iter(self._manifest)
98 return iter(self._manifest)
99
99
100 def _buildstatusmanifest(self, status):
100 def _buildstatusmanifest(self, status):
101 """Builds a manifest that includes the given status results, if this is
101 """Builds a manifest that includes the given status results, if this is
102 a working copy context. For non-working copy contexts, it just returns
102 a working copy context. For non-working copy contexts, it just returns
103 the normal manifest."""
103 the normal manifest."""
104 return self.manifest()
104 return self.manifest()
105
105
106 def _matchstatus(self, other, match):
106 def _matchstatus(self, other, match):
107 """return match.always if match is none
107 """return match.always if match is none
108
108
109 This internal method provides a way for child objects to override the
109 This internal method provides a way for child objects to override the
110 match operator.
110 match operator.
111 """
111 """
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
112 return match or matchmod.always(self._repo.root, self._repo.getcwd())
113
113
114 def _buildstatus(self, other, s, match, listignored, listclean,
114 def _buildstatus(self, other, s, match, listignored, listclean,
115 listunknown):
115 listunknown):
116 """build a status with respect to another context"""
116 """build a status with respect to another context"""
117 # Load earliest manifest first for caching reasons. More specifically,
117 # Load earliest manifest first for caching reasons. More specifically,
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
118 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
119 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # 1000 and cache it so that when you read 1001, we just need to apply a
120 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # delta to what's in the cache. So that's one full reconstruction + one
121 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta application.
122 # delta application.
123 mf2 = None
123 mf2 = None
124 if self.rev() is not None and self.rev() < other.rev():
124 if self.rev() is not None and self.rev() < other.rev():
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
126 mf1 = other._buildstatusmanifest(s)
127 if mf2 is None:
127 if mf2 is None:
128 mf2 = self._buildstatusmanifest(s)
128 mf2 = self._buildstatusmanifest(s)
129
129
130 modified, added = [], []
130 modified, added = [], []
131 removed = []
131 removed = []
132 clean = []
132 clean = []
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
133 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deletedset = set(deleted)
134 deletedset = set(deleted)
135 d = mf1.diff(mf2, match=match, clean=listclean)
135 d = mf1.diff(mf2, match=match, clean=listclean)
136 for fn, value in d.iteritems():
136 for fn, value in d.iteritems():
137 if fn in deletedset:
137 if fn in deletedset:
138 continue
138 continue
139 if value is None:
139 if value is None:
140 clean.append(fn)
140 clean.append(fn)
141 continue
141 continue
142 (node1, flag1), (node2, flag2) = value
142 (node1, flag1), (node2, flag2) = value
143 if node1 is None:
143 if node1 is None:
144 added.append(fn)
144 added.append(fn)
145 elif node2 is None:
145 elif node2 is None:
146 removed.append(fn)
146 removed.append(fn)
147 elif flag1 != flag2:
147 elif flag1 != flag2:
148 modified.append(fn)
148 modified.append(fn)
149 elif node2 not in wdirnodes:
149 elif node2 not in wdirnodes:
150 # When comparing files between two commits, we save time by
150 # When comparing files between two commits, we save time by
151 # not comparing the file contents when the nodeids differ.
151 # not comparing the file contents when the nodeids differ.
152 # Note that this means we incorrectly report a reverted change
152 # Note that this means we incorrectly report a reverted change
153 # to a file as a modification.
153 # to a file as a modification.
154 modified.append(fn)
154 modified.append(fn)
155 elif self[fn].cmp(other[fn]):
155 elif self[fn].cmp(other[fn]):
156 modified.append(fn)
156 modified.append(fn)
157 else:
157 else:
158 clean.append(fn)
158 clean.append(fn)
159
159
160 if removed:
160 if removed:
161 # need to filter files if they are already reported as removed
161 # need to filter files if they are already reported as removed
162 unknown = [fn for fn in unknown if fn not in mf1 and
162 unknown = [fn for fn in unknown if fn not in mf1 and
163 (not match or match(fn))]
163 (not match or match(fn))]
164 ignored = [fn for fn in ignored if fn not in mf1 and
164 ignored = [fn for fn in ignored if fn not in mf1 and
165 (not match or match(fn))]
165 (not match or match(fn))]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(modified, added, removed, deleted, unknown,
169 return scmutil.status(modified, added, removed, deleted, unknown,
170 ignored, clean)
170 ignored, clean)
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepo.state(self, self._repo.ui)
174 return subrepo.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181 def node(self):
181 def node(self):
182 return self._node
182 return self._node
183 def hex(self):
183 def hex(self):
184 return hex(self.node())
184 return hex(self.node())
185 def manifest(self):
185 def manifest(self):
186 return self._manifest
186 return self._manifest
187 def manifestctx(self):
187 def manifestctx(self):
188 return self._manifestctx
188 return self._manifestctx
189 def repo(self):
189 def repo(self):
190 return self._repo
190 return self._repo
191 def phasestr(self):
191 def phasestr(self):
192 return phases.phasenames[self.phase()]
192 return phases.phasenames[self.phase()]
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def getfileset(self, expr):
196 def getfileset(self, expr):
197 return fileset.getfileset(self, expr)
197 return fileset.getfileset(self, expr)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206
206
207 def unstable(self):
207 def unstable(self):
208 """True if the changeset is not obsolete but it's ancestor are"""
208 """True if the changeset is not obsolete but it's ancestor are"""
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
209 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
210
210
211 def bumped(self):
211 def bumped(self):
212 """True if the changeset try to be a successor of a public changeset
212 """True if the changeset try to be a successor of a public changeset
213
213
214 Only non-public and non-obsolete changesets may be bumped.
214 Only non-public and non-obsolete changesets may be bumped.
215 """
215 """
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
216 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
217
217
218 def divergent(self):
218 def divergent(self):
219 """Is a successors of a changeset with multiple possible successors set
219 """Is a successors of a changeset with multiple possible successors set
220
220
221 Only non-public and non-obsolete changesets may be divergent.
221 Only non-public and non-obsolete changesets may be divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
223 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
224
224
225 def troubled(self):
225 def troubled(self):
226 """True if the changeset is either unstable, bumped or divergent"""
226 """True if the changeset is either unstable, bumped or divergent"""
227 return self.unstable() or self.bumped() or self.divergent()
227 return self.unstable() or self.bumped() or self.divergent()
228
228
229 def troubles(self):
229 def troubles(self):
230 """return the list of troubles affecting this changesets.
230 """return the list of troubles affecting this changesets.
231
231
232 Troubles are returned as strings. possible values are:
232 Troubles are returned as strings. possible values are:
233 - unstable,
233 - unstable,
234 - bumped,
234 - bumped,
235 - divergent.
235 - divergent.
236 """
236 """
237 troubles = []
237 troubles = []
238 if self.unstable():
238 if self.unstable():
239 troubles.append('unstable')
239 troubles.append('unstable')
240 if self.bumped():
240 if self.bumped():
241 troubles.append('bumped')
241 troubles.append('bumped')
242 if self.divergent():
242 if self.divergent():
243 troubles.append('divergent')
243 troubles.append('divergent')
244 return troubles
244 return troubles
245
245
246 def parents(self):
246 def parents(self):
247 """return contexts for each parent changeset"""
247 """return contexts for each parent changeset"""
248 return self._parents
248 return self._parents
249
249
250 def p1(self):
250 def p1(self):
251 return self._parents[0]
251 return self._parents[0]
252
252
253 def p2(self):
253 def p2(self):
254 parents = self._parents
254 parents = self._parents
255 if len(parents) == 2:
255 if len(parents) == 2:
256 return parents[1]
256 return parents[1]
257 return changectx(self._repo, nullrev)
257 return changectx(self._repo, nullrev)
258
258
259 def _fileinfo(self, path):
259 def _fileinfo(self, path):
260 if '_manifest' in self.__dict__:
260 if '_manifest' in self.__dict__:
261 try:
261 try:
262 return self._manifest[path], self._manifest.flags(path)
262 return self._manifest[path], self._manifest.flags(path)
263 except KeyError:
263 except KeyError:
264 raise error.ManifestLookupError(self._node, path,
264 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
265 _('not found in manifest'))
266 if '_manifestdelta' in self.__dict__ or path in self.files():
266 if '_manifestdelta' in self.__dict__ or path in self.files():
267 if path in self._manifestdelta:
267 if path in self._manifestdelta:
268 return (self._manifestdelta[path],
268 return (self._manifestdelta[path],
269 self._manifestdelta.flags(path))
269 self._manifestdelta.flags(path))
270 mfl = self._repo.manifestlog
270 mfl = self._repo.manifestlog
271 try:
271 try:
272 node, flag = mfl[self._changeset.manifest].find(path)
272 node, flag = mfl[self._changeset.manifest].find(path)
273 except KeyError:
273 except KeyError:
274 raise error.ManifestLookupError(self._node, path,
274 raise error.ManifestLookupError(self._node, path,
275 _('not found in manifest'))
275 _('not found in manifest'))
276
276
277 return node, flag
277 return node, flag
278
278
279 def filenode(self, path):
279 def filenode(self, path):
280 return self._fileinfo(path)[0]
280 return self._fileinfo(path)[0]
281
281
282 def flags(self, path):
282 def flags(self, path):
283 try:
283 try:
284 return self._fileinfo(path)[1]
284 return self._fileinfo(path)[1]
285 except error.LookupError:
285 except error.LookupError:
286 return ''
286 return ''
287
287
288 def sub(self, path, allowcreate=True):
288 def sub(self, path, allowcreate=True):
289 '''return a subrepo for the stored revision of path, never wdir()'''
289 '''return a subrepo for the stored revision of path, never wdir()'''
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
290 return subrepo.subrepo(self, path, allowcreate=allowcreate)
291
291
292 def nullsub(self, path, pctx):
292 def nullsub(self, path, pctx):
293 return subrepo.nullsubrepo(self, path, pctx)
293 return subrepo.nullsubrepo(self, path, pctx)
294
294
295 def workingsub(self, path):
295 def workingsub(self, path):
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
296 '''return a subrepo for the stored revision, or wdir if this is a wdir
297 context.
297 context.
298 '''
298 '''
299 return subrepo.subrepo(self, path, allowwdir=True)
299 return subrepo.subrepo(self, path, allowwdir=True)
300
300
301 def match(self, pats=None, include=None, exclude=None, default='glob',
301 def match(self, pats=None, include=None, exclude=None, default='glob',
302 listsubrepos=False, badfn=None):
302 listsubrepos=False, badfn=None):
303 if pats is None:
303 if pats is None:
304 pats = []
304 pats = []
305 r = self._repo
305 r = self._repo
306 return matchmod.match(r.root, r.getcwd(), pats,
306 return matchmod.match(r.root, r.getcwd(), pats,
307 include, exclude, default,
307 include, exclude, default,
308 auditor=r.nofsauditor, ctx=self,
308 auditor=r.nofsauditor, ctx=self,
309 listsubrepos=listsubrepos, badfn=badfn)
309 listsubrepos=listsubrepos, badfn=badfn)
310
310
311 def diff(self, ctx2=None, match=None, **opts):
311 def diff(self, ctx2=None, match=None, **opts):
312 """Returns a diff generator for the given contexts and matcher"""
312 """Returns a diff generator for the given contexts and matcher"""
313 if ctx2 is None:
313 if ctx2 is None:
314 ctx2 = self.p1()
314 ctx2 = self.p1()
315 if ctx2 is not None:
315 if ctx2 is not None:
316 ctx2 = self._repo[ctx2]
316 ctx2 = self._repo[ctx2]
317 diffopts = patch.diffopts(self._repo.ui, opts)
317 diffopts = patch.diffopts(self._repo.ui, opts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
318 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
319
319
320 def dirs(self):
320 def dirs(self):
321 return self._manifest.dirs()
321 return self._manifest.dirs()
322
322
323 def hasdir(self, dir):
323 def hasdir(self, dir):
324 return self._manifest.hasdir(dir)
324 return self._manifest.hasdir(dir)
325
325
326 def dirty(self, missing=False, merge=True, branch=True):
326 def dirty(self, missing=False, merge=True, branch=True):
327 return False
327 return False
328
328
329 def status(self, other=None, match=None, listignored=False,
329 def status(self, other=None, match=None, listignored=False,
330 listclean=False, listunknown=False, listsubrepos=False):
330 listclean=False, listunknown=False, listsubrepos=False):
331 """return status of files between two nodes or node and working
331 """return status of files between two nodes or node and working
332 directory.
332 directory.
333
333
334 If other is None, compare this node with working directory.
334 If other is None, compare this node with working directory.
335
335
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
336 returns (modified, added, removed, deleted, unknown, ignored, clean)
337 """
337 """
338
338
339 ctx1 = self
339 ctx1 = self
340 ctx2 = self._repo[other]
340 ctx2 = self._repo[other]
341
341
342 # This next code block is, admittedly, fragile logic that tests for
342 # This next code block is, admittedly, fragile logic that tests for
343 # reversing the contexts and wouldn't need to exist if it weren't for
343 # reversing the contexts and wouldn't need to exist if it weren't for
344 # the fast (and common) code path of comparing the working directory
344 # the fast (and common) code path of comparing the working directory
345 # with its first parent.
345 # with its first parent.
346 #
346 #
347 # What we're aiming for here is the ability to call:
347 # What we're aiming for here is the ability to call:
348 #
348 #
349 # workingctx.status(parentctx)
349 # workingctx.status(parentctx)
350 #
350 #
351 # If we always built the manifest for each context and compared those,
351 # If we always built the manifest for each context and compared those,
352 # then we'd be done. But the special case of the above call means we
352 # then we'd be done. But the special case of the above call means we
353 # just copy the manifest of the parent.
353 # just copy the manifest of the parent.
354 reversed = False
354 reversed = False
355 if (not isinstance(ctx1, changectx)
355 if (not isinstance(ctx1, changectx)
356 and isinstance(ctx2, changectx)):
356 and isinstance(ctx2, changectx)):
357 reversed = True
357 reversed = True
358 ctx1, ctx2 = ctx2, ctx1
358 ctx1, ctx2 = ctx2, ctx1
359
359
360 match = ctx2._matchstatus(ctx1, match)
360 match = ctx2._matchstatus(ctx1, match)
361 r = scmutil.status([], [], [], [], [], [], [])
361 r = scmutil.status([], [], [], [], [], [], [])
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
362 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
363 listunknown)
363 listunknown)
364
364
365 if reversed:
365 if reversed:
366 # Reverse added and removed. Clear deleted, unknown and ignored as
366 # Reverse added and removed. Clear deleted, unknown and ignored as
367 # these make no sense to reverse.
367 # these make no sense to reverse.
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
368 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
369 r.clean)
369 r.clean)
370
370
371 if listsubrepos:
371 if listsubrepos:
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
372 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
373 try:
373 try:
374 rev2 = ctx2.subrev(subpath)
374 rev2 = ctx2.subrev(subpath)
375 except KeyError:
375 except KeyError:
376 # A subrepo that existed in node1 was deleted between
376 # A subrepo that existed in node1 was deleted between
377 # node1 and node2 (inclusive). Thus, ctx2's substate
377 # node1 and node2 (inclusive). Thus, ctx2's substate
378 # won't contain that subpath. The best we can do ignore it.
378 # won't contain that subpath. The best we can do ignore it.
379 rev2 = None
379 rev2 = None
380 submatch = matchmod.subdirmatcher(subpath, match)
380 submatch = matchmod.subdirmatcher(subpath, match)
381 s = sub.status(rev2, match=submatch, ignored=listignored,
381 s = sub.status(rev2, match=submatch, ignored=listignored,
382 clean=listclean, unknown=listunknown,
382 clean=listclean, unknown=listunknown,
383 listsubrepos=True)
383 listsubrepos=True)
384 for rfiles, sfiles in zip(r, s):
384 for rfiles, sfiles in zip(r, s):
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
385 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
386
386
387 for l in r:
387 for l in r:
388 l.sort()
388 l.sort()
389
389
390 return r
390 return r
391
391
392
392
393 def makememctx(repo, parents, text, user, date, branch, files, store,
393 def makememctx(repo, parents, text, user, date, branch, files, store,
394 editor=None, extra=None):
394 editor=None, extra=None):
395 def getfilectx(repo, memctx, path):
395 def getfilectx(repo, memctx, path):
396 data, mode, copied = store.getfile(path)
396 data, mode, copied = store.getfile(path)
397 if data is None:
397 if data is None:
398 return None
398 return None
399 islink, isexec = mode
399 islink, isexec = mode
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
400 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
401 copied=copied, memctx=memctx)
401 copied=copied, memctx=memctx)
402 if extra is None:
402 if extra is None:
403 extra = {}
403 extra = {}
404 if branch:
404 if branch:
405 extra['branch'] = encoding.fromlocal(branch)
405 extra['branch'] = encoding.fromlocal(branch)
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
406 ctx = memctx(repo, parents, text, files, getfilectx, user,
407 date, extra, editor)
407 date, extra, editor)
408 return ctx
408 return ctx
409
409
410 def _filterederror(repo, changeid):
410 def _filterederror(repo, changeid):
411 """build an exception to be raised about a filtered changeid
411 """build an exception to be raised about a filtered changeid
412
412
413 This is extracted in a function to help extensions (eg: evolve) to
413 This is extracted in a function to help extensions (eg: evolve) to
414 experiment with various message variants."""
414 experiment with various message variants."""
415 if repo.filtername.startswith('visible'):
415 if repo.filtername.startswith('visible'):
416 msg = _("hidden revision '%s'") % changeid
416 msg = _("hidden revision '%s'") % changeid
417 hint = _('use --hidden to access hidden revisions')
417 hint = _('use --hidden to access hidden revisions')
418 return error.FilteredRepoLookupError(msg, hint=hint)
418 return error.FilteredRepoLookupError(msg, hint=hint)
419 msg = _("filtered revision '%s' (not in '%s' subset)")
419 msg = _("filtered revision '%s' (not in '%s' subset)")
420 msg %= (changeid, repo.filtername)
420 msg %= (changeid, repo.filtername)
421 return error.FilteredRepoLookupError(msg)
421 return error.FilteredRepoLookupError(msg)
422
422
423 class changectx(basectx):
423 class changectx(basectx):
424 """A changecontext object makes access to data related to a particular
424 """A changecontext object makes access to data related to a particular
425 changeset convenient. It represents a read-only context already present in
425 changeset convenient. It represents a read-only context already present in
426 the repo."""
426 the repo."""
427 def __init__(self, repo, changeid=''):
427 def __init__(self, repo, changeid=''):
428 """changeid is a revision number, node, or tag"""
428 """changeid is a revision number, node, or tag"""
429
429
430 # since basectx.__new__ already took care of copying the object, we
430 # since basectx.__new__ already took care of copying the object, we
431 # don't need to do anything in __init__, so we just exit here
431 # don't need to do anything in __init__, so we just exit here
432 if isinstance(changeid, basectx):
432 if isinstance(changeid, basectx):
433 return
433 return
434
434
435 if changeid == '':
435 if changeid == '':
436 changeid = '.'
436 changeid = '.'
437 self._repo = repo
437 self._repo = repo
438
438
439 try:
439 try:
440 if isinstance(changeid, int):
440 if isinstance(changeid, int):
441 self._node = repo.changelog.node(changeid)
441 self._node = repo.changelog.node(changeid)
442 self._rev = changeid
442 self._rev = changeid
443 return
443 return
444 if not pycompat.ispy3 and isinstance(changeid, long):
444 if not pycompat.ispy3 and isinstance(changeid, long):
445 changeid = str(changeid)
445 changeid = str(changeid)
446 if changeid == 'null':
446 if changeid == 'null':
447 self._node = nullid
447 self._node = nullid
448 self._rev = nullrev
448 self._rev = nullrev
449 return
449 return
450 if changeid == 'tip':
450 if changeid == 'tip':
451 self._node = repo.changelog.tip()
451 self._node = repo.changelog.tip()
452 self._rev = repo.changelog.rev(self._node)
452 self._rev = repo.changelog.rev(self._node)
453 return
453 return
454 if changeid == '.' or changeid == repo.dirstate.p1():
454 if changeid == '.' or changeid == repo.dirstate.p1():
455 # this is a hack to delay/avoid loading obsmarkers
455 # this is a hack to delay/avoid loading obsmarkers
456 # when we know that '.' won't be hidden
456 # when we know that '.' won't be hidden
457 self._node = repo.dirstate.p1()
457 self._node = repo.dirstate.p1()
458 self._rev = repo.unfiltered().changelog.rev(self._node)
458 self._rev = repo.unfiltered().changelog.rev(self._node)
459 return
459 return
460 if len(changeid) == 20:
460 if len(changeid) == 20:
461 try:
461 try:
462 self._node = changeid
462 self._node = changeid
463 self._rev = repo.changelog.rev(changeid)
463 self._rev = repo.changelog.rev(changeid)
464 return
464 return
465 except error.FilteredRepoLookupError:
465 except error.FilteredRepoLookupError:
466 raise
466 raise
467 except LookupError:
467 except LookupError:
468 pass
468 pass
469
469
470 try:
470 try:
471 r = int(changeid)
471 r = int(changeid)
472 if '%d' % r != changeid:
472 if '%d' % r != changeid:
473 raise ValueError
473 raise ValueError
474 l = len(repo.changelog)
474 l = len(repo.changelog)
475 if r < 0:
475 if r < 0:
476 r += l
476 r += l
477 if r < 0 or r >= l:
477 if r < 0 or r >= l:
478 raise ValueError
478 raise ValueError
479 self._rev = r
479 self._rev = r
480 self._node = repo.changelog.node(r)
480 self._node = repo.changelog.node(r)
481 return
481 return
482 except error.FilteredIndexError:
482 except error.FilteredIndexError:
483 raise
483 raise
484 except (ValueError, OverflowError, IndexError):
484 except (ValueError, OverflowError, IndexError):
485 pass
485 pass
486
486
487 if len(changeid) == 40:
487 if len(changeid) == 40:
488 try:
488 try:
489 self._node = bin(changeid)
489 self._node = bin(changeid)
490 self._rev = repo.changelog.rev(self._node)
490 self._rev = repo.changelog.rev(self._node)
491 return
491 return
492 except error.FilteredLookupError:
492 except error.FilteredLookupError:
493 raise
493 raise
494 except (TypeError, LookupError):
494 except (TypeError, LookupError):
495 pass
495 pass
496
496
497 # lookup bookmarks through the name interface
497 # lookup bookmarks through the name interface
498 try:
498 try:
499 self._node = repo.names.singlenode(repo, changeid)
499 self._node = repo.names.singlenode(repo, changeid)
500 self._rev = repo.changelog.rev(self._node)
500 self._rev = repo.changelog.rev(self._node)
501 return
501 return
502 except KeyError:
502 except KeyError:
503 pass
503 pass
504 except error.FilteredRepoLookupError:
504 except error.FilteredRepoLookupError:
505 raise
505 raise
506 except error.RepoLookupError:
506 except error.RepoLookupError:
507 pass
507 pass
508
508
509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
509 self._node = repo.unfiltered().changelog._partialmatch(changeid)
510 if self._node is not None:
510 if self._node is not None:
511 self._rev = repo.changelog.rev(self._node)
511 self._rev = repo.changelog.rev(self._node)
512 return
512 return
513
513
514 # lookup failed
514 # lookup failed
515 # check if it might have come from damaged dirstate
515 # check if it might have come from damaged dirstate
516 #
516 #
517 # XXX we could avoid the unfiltered if we had a recognizable
517 # XXX we could avoid the unfiltered if we had a recognizable
518 # exception for filtered changeset access
518 # exception for filtered changeset access
519 if changeid in repo.unfiltered().dirstate.parents():
519 if changeid in repo.unfiltered().dirstate.parents():
520 msg = _("working directory has unknown parent '%s'!")
520 msg = _("working directory has unknown parent '%s'!")
521 raise error.Abort(msg % short(changeid))
521 raise error.Abort(msg % short(changeid))
522 try:
522 try:
523 if len(changeid) == 20 and nonascii(changeid):
523 if len(changeid) == 20 and nonascii(changeid):
524 changeid = hex(changeid)
524 changeid = hex(changeid)
525 except TypeError:
525 except TypeError:
526 pass
526 pass
527 except (error.FilteredIndexError, error.FilteredLookupError,
527 except (error.FilteredIndexError, error.FilteredLookupError,
528 error.FilteredRepoLookupError):
528 error.FilteredRepoLookupError):
529 raise _filterederror(repo, changeid)
529 raise _filterederror(repo, changeid)
530 except IndexError:
530 except IndexError:
531 pass
531 pass
532 raise error.RepoLookupError(
532 raise error.RepoLookupError(
533 _("unknown revision '%s'") % changeid)
533 _("unknown revision '%s'") % changeid)
534
534
535 def __hash__(self):
535 def __hash__(self):
536 try:
536 try:
537 return hash(self._rev)
537 return hash(self._rev)
538 except AttributeError:
538 except AttributeError:
539 return id(self)
539 return id(self)
540
540
541 def __nonzero__(self):
541 def __nonzero__(self):
542 return self._rev != nullrev
542 return self._rev != nullrev
543
543
544 __bool__ = __nonzero__
544 __bool__ = __nonzero__
545
545
546 @propertycache
546 @propertycache
547 def _changeset(self):
547 def _changeset(self):
548 return self._repo.changelog.changelogrevision(self.rev())
548 return self._repo.changelog.changelogrevision(self.rev())
549
549
550 @propertycache
550 @propertycache
551 def _manifest(self):
551 def _manifest(self):
552 return self._manifestctx.read()
552 return self._manifestctx.read()
553
553
554 @propertycache
554 @propertycache
555 def _manifestctx(self):
555 def _manifestctx(self):
556 return self._repo.manifestlog[self._changeset.manifest]
556 return self._repo.manifestlog[self._changeset.manifest]
557
557
558 @propertycache
558 @propertycache
559 def _manifestdelta(self):
559 def _manifestdelta(self):
560 return self._manifestctx.readdelta()
560 return self._manifestctx.readdelta()
561
561
562 @propertycache
562 @propertycache
563 def _parents(self):
563 def _parents(self):
564 repo = self._repo
564 repo = self._repo
565 p1, p2 = repo.changelog.parentrevs(self._rev)
565 p1, p2 = repo.changelog.parentrevs(self._rev)
566 if p2 == nullrev:
566 if p2 == nullrev:
567 return [changectx(repo, p1)]
567 return [changectx(repo, p1)]
568 return [changectx(repo, p1), changectx(repo, p2)]
568 return [changectx(repo, p1), changectx(repo, p2)]
569
569
570 def changeset(self):
570 def changeset(self):
571 c = self._changeset
571 c = self._changeset
572 return (
572 return (
573 c.manifest,
573 c.manifest,
574 c.user,
574 c.user,
575 c.date,
575 c.date,
576 c.files,
576 c.files,
577 c.description,
577 c.description,
578 c.extra,
578 c.extra,
579 )
579 )
580 def manifestnode(self):
580 def manifestnode(self):
581 return self._changeset.manifest
581 return self._changeset.manifest
582
582
583 def user(self):
583 def user(self):
584 return self._changeset.user
584 return self._changeset.user
585 def date(self):
585 def date(self):
586 return self._changeset.date
586 return self._changeset.date
587 def files(self):
587 def files(self):
588 return self._changeset.files
588 return self._changeset.files
589 def description(self):
589 def description(self):
590 return self._changeset.description
590 return self._changeset.description
591 def branch(self):
591 def branch(self):
592 return encoding.tolocal(self._changeset.extra.get("branch"))
592 return encoding.tolocal(self._changeset.extra.get("branch"))
593 def closesbranch(self):
593 def closesbranch(self):
594 return 'close' in self._changeset.extra
594 return 'close' in self._changeset.extra
595 def extra(self):
595 def extra(self):
596 return self._changeset.extra
596 return self._changeset.extra
597 def tags(self):
597 def tags(self):
598 return self._repo.nodetags(self._node)
598 return self._repo.nodetags(self._node)
599 def bookmarks(self):
599 def bookmarks(self):
600 return self._repo.nodebookmarks(self._node)
600 return self._repo.nodebookmarks(self._node)
601 def phase(self):
601 def phase(self):
602 return self._repo._phasecache.phase(self._repo, self._rev)
602 return self._repo._phasecache.phase(self._repo, self._rev)
603 def hidden(self):
603 def hidden(self):
604 return self._rev in repoview.filterrevs(self._repo, 'visible')
604 return self._rev in repoview.filterrevs(self._repo, 'visible')
605
605
606 def children(self):
606 def children(self):
607 """return contexts for each child changeset"""
607 """return contexts for each child changeset"""
608 c = self._repo.changelog.children(self._node)
608 c = self._repo.changelog.children(self._node)
609 return [changectx(self._repo, x) for x in c]
609 return [changectx(self._repo, x) for x in c]
610
610
611 def ancestors(self):
611 def ancestors(self):
612 for a in self._repo.changelog.ancestors([self._rev]):
612 for a in self._repo.changelog.ancestors([self._rev]):
613 yield changectx(self._repo, a)
613 yield changectx(self._repo, a)
614
614
615 def descendants(self):
615 def descendants(self):
616 for d in self._repo.changelog.descendants([self._rev]):
616 for d in self._repo.changelog.descendants([self._rev]):
617 yield changectx(self._repo, d)
617 yield changectx(self._repo, d)
618
618
619 def filectx(self, path, fileid=None, filelog=None):
619 def filectx(self, path, fileid=None, filelog=None):
620 """get a file context from this changeset"""
620 """get a file context from this changeset"""
621 if fileid is None:
621 if fileid is None:
622 fileid = self.filenode(path)
622 fileid = self.filenode(path)
623 return filectx(self._repo, path, fileid=fileid,
623 return filectx(self._repo, path, fileid=fileid,
624 changectx=self, filelog=filelog)
624 changectx=self, filelog=filelog)
625
625
626 def ancestor(self, c2, warn=False):
626 def ancestor(self, c2, warn=False):
627 """return the "best" ancestor context of self and c2
627 """return the "best" ancestor context of self and c2
628
628
629 If there are multiple candidates, it will show a message and check
629 If there are multiple candidates, it will show a message and check
630 merge.preferancestor configuration before falling back to the
630 merge.preferancestor configuration before falling back to the
631 revlog ancestor."""
631 revlog ancestor."""
632 # deal with workingctxs
632 # deal with workingctxs
633 n2 = c2._node
633 n2 = c2._node
634 if n2 is None:
634 if n2 is None:
635 n2 = c2._parents[0]._node
635 n2 = c2._parents[0]._node
636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
636 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
637 if not cahs:
637 if not cahs:
638 anc = nullid
638 anc = nullid
639 elif len(cahs) == 1:
639 elif len(cahs) == 1:
640 anc = cahs[0]
640 anc = cahs[0]
641 else:
641 else:
642 # experimental config: merge.preferancestor
642 # experimental config: merge.preferancestor
643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
643 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
644 try:
644 try:
645 ctx = changectx(self._repo, r)
645 ctx = changectx(self._repo, r)
646 except error.RepoLookupError:
646 except error.RepoLookupError:
647 continue
647 continue
648 anc = ctx.node()
648 anc = ctx.node()
649 if anc in cahs:
649 if anc in cahs:
650 break
650 break
651 else:
651 else:
652 anc = self._repo.changelog.ancestor(self._node, n2)
652 anc = self._repo.changelog.ancestor(self._node, n2)
653 if warn:
653 if warn:
654 self._repo.ui.status(
654 self._repo.ui.status(
655 (_("note: using %s as ancestor of %s and %s\n") %
655 (_("note: using %s as ancestor of %s and %s\n") %
656 (short(anc), short(self._node), short(n2))) +
656 (short(anc), short(self._node), short(n2))) +
657 ''.join(_(" alternatively, use --config "
657 ''.join(_(" alternatively, use --config "
658 "merge.preferancestor=%s\n") %
658 "merge.preferancestor=%s\n") %
659 short(n) for n in sorted(cahs) if n != anc))
659 short(n) for n in sorted(cahs) if n != anc))
660 return changectx(self._repo, anc)
660 return changectx(self._repo, anc)
661
661
662 def descendant(self, other):
662 def descendant(self, other):
663 """True if other is descendant of this changeset"""
663 """True if other is descendant of this changeset"""
664 return self._repo.changelog.descendant(self._rev, other._rev)
664 return self._repo.changelog.descendant(self._rev, other._rev)
665
665
666 def walk(self, match):
666 def walk(self, match):
667 '''Generates matching file names.'''
667 '''Generates matching file names.'''
668
668
669 # Wrap match.bad method to have message with nodeid
669 # Wrap match.bad method to have message with nodeid
670 def bad(fn, msg):
670 def bad(fn, msg):
671 # The manifest doesn't know about subrepos, so don't complain about
671 # The manifest doesn't know about subrepos, so don't complain about
672 # paths into valid subrepos.
672 # paths into valid subrepos.
673 if any(fn == s or fn.startswith(s + '/')
673 if any(fn == s or fn.startswith(s + '/')
674 for s in self.substate):
674 for s in self.substate):
675 return
675 return
676 match.bad(fn, _('no such file in rev %s') % self)
676 match.bad(fn, _('no such file in rev %s') % self)
677
677
678 m = matchmod.badmatch(match, bad)
678 m = matchmod.badmatch(match, bad)
679 return self._manifest.walk(m)
679 return self._manifest.walk(m)
680
680
681 def matches(self, match):
681 def matches(self, match):
682 return self.walk(match)
682 return self.walk(match)
683
683
684 class basefilectx(object):
684 class basefilectx(object):
685 """A filecontext object represents the common logic for its children:
685 """A filecontext object represents the common logic for its children:
686 filectx: read-only access to a filerevision that is already present
686 filectx: read-only access to a filerevision that is already present
687 in the repo,
687 in the repo,
688 workingfilectx: a filecontext that represents files from the working
688 workingfilectx: a filecontext that represents files from the working
689 directory,
689 directory,
690 memfilectx: a filecontext that represents files in-memory."""
690 memfilectx: a filecontext that represents files in-memory."""
691 def __new__(cls, repo, path, *args, **kwargs):
691 def __new__(cls, repo, path, *args, **kwargs):
692 return super(basefilectx, cls).__new__(cls)
692 return super(basefilectx, cls).__new__(cls)
693
693
694 @propertycache
694 @propertycache
695 def _filelog(self):
695 def _filelog(self):
696 return self._repo.file(self._path)
696 return self._repo.file(self._path)
697
697
698 @propertycache
698 @propertycache
699 def _changeid(self):
699 def _changeid(self):
700 if '_changeid' in self.__dict__:
700 if '_changeid' in self.__dict__:
701 return self._changeid
701 return self._changeid
702 elif '_changectx' in self.__dict__:
702 elif '_changectx' in self.__dict__:
703 return self._changectx.rev()
703 return self._changectx.rev()
704 elif '_descendantrev' in self.__dict__:
704 elif '_descendantrev' in self.__dict__:
705 # this file context was created from a revision with a known
705 # this file context was created from a revision with a known
706 # descendant, we can (lazily) correct for linkrev aliases
706 # descendant, we can (lazily) correct for linkrev aliases
707 return self._adjustlinkrev(self._descendantrev)
707 return self._adjustlinkrev(self._descendantrev)
708 else:
708 else:
709 return self._filelog.linkrev(self._filerev)
709 return self._filelog.linkrev(self._filerev)
710
710
711 @propertycache
711 @propertycache
712 def _filenode(self):
712 def _filenode(self):
713 if '_fileid' in self.__dict__:
713 if '_fileid' in self.__dict__:
714 return self._filelog.lookup(self._fileid)
714 return self._filelog.lookup(self._fileid)
715 else:
715 else:
716 return self._changectx.filenode(self._path)
716 return self._changectx.filenode(self._path)
717
717
718 @propertycache
718 @propertycache
719 def _filerev(self):
719 def _filerev(self):
720 return self._filelog.rev(self._filenode)
720 return self._filelog.rev(self._filenode)
721
721
722 @propertycache
722 @propertycache
723 def _repopath(self):
723 def _repopath(self):
724 return self._path
724 return self._path
725
725
726 def __nonzero__(self):
726 def __nonzero__(self):
727 try:
727 try:
728 self._filenode
728 self._filenode
729 return True
729 return True
730 except error.LookupError:
730 except error.LookupError:
731 # file is missing
731 # file is missing
732 return False
732 return False
733
733
734 __bool__ = __nonzero__
734 __bool__ = __nonzero__
735
735
736 def __str__(self):
736 def __str__(self):
737 try:
737 try:
738 return "%s@%s" % (self.path(), self._changectx)
738 return "%s@%s" % (self.path(), self._changectx)
739 except error.LookupError:
739 except error.LookupError:
740 return "%s@???" % self.path()
740 return "%s@???" % self.path()
741
741
742 def __repr__(self):
742 def __repr__(self):
743 return "<%s %s>" % (type(self).__name__, str(self))
743 return "<%s %s>" % (type(self).__name__, str(self))
744
744
745 def __hash__(self):
745 def __hash__(self):
746 try:
746 try:
747 return hash((self._path, self._filenode))
747 return hash((self._path, self._filenode))
748 except AttributeError:
748 except AttributeError:
749 return id(self)
749 return id(self)
750
750
751 def __eq__(self, other):
751 def __eq__(self, other):
752 try:
752 try:
753 return (type(self) == type(other) and self._path == other._path
753 return (type(self) == type(other) and self._path == other._path
754 and self._filenode == other._filenode)
754 and self._filenode == other._filenode)
755 except AttributeError:
755 except AttributeError:
756 return False
756 return False
757
757
758 def __ne__(self, other):
758 def __ne__(self, other):
759 return not (self == other)
759 return not (self == other)
760
760
761 def filerev(self):
761 def filerev(self):
762 return self._filerev
762 return self._filerev
763 def filenode(self):
763 def filenode(self):
764 return self._filenode
764 return self._filenode
765 def flags(self):
765 def flags(self):
766 return self._changectx.flags(self._path)
766 return self._changectx.flags(self._path)
767 def filelog(self):
767 def filelog(self):
768 return self._filelog
768 return self._filelog
769 def rev(self):
769 def rev(self):
770 return self._changeid
770 return self._changeid
771 def linkrev(self):
771 def linkrev(self):
772 return self._filelog.linkrev(self._filerev)
772 return self._filelog.linkrev(self._filerev)
773 def node(self):
773 def node(self):
774 return self._changectx.node()
774 return self._changectx.node()
775 def hex(self):
775 def hex(self):
776 return self._changectx.hex()
776 return self._changectx.hex()
777 def user(self):
777 def user(self):
778 return self._changectx.user()
778 return self._changectx.user()
779 def date(self):
779 def date(self):
780 return self._changectx.date()
780 return self._changectx.date()
781 def files(self):
781 def files(self):
782 return self._changectx.files()
782 return self._changectx.files()
783 def description(self):
783 def description(self):
784 return self._changectx.description()
784 return self._changectx.description()
785 def branch(self):
785 def branch(self):
786 return self._changectx.branch()
786 return self._changectx.branch()
787 def extra(self):
787 def extra(self):
788 return self._changectx.extra()
788 return self._changectx.extra()
789 def phase(self):
789 def phase(self):
790 return self._changectx.phase()
790 return self._changectx.phase()
791 def phasestr(self):
791 def phasestr(self):
792 return self._changectx.phasestr()
792 return self._changectx.phasestr()
793 def manifest(self):
793 def manifest(self):
794 return self._changectx.manifest()
794 return self._changectx.manifest()
795 def changectx(self):
795 def changectx(self):
796 return self._changectx
796 return self._changectx
797 def repo(self):
797 def repo(self):
798 return self._repo
798 return self._repo
799
799
800 def path(self):
800 def path(self):
801 return self._path
801 return self._path
802
802
803 def isbinary(self):
803 def isbinary(self):
804 try:
804 try:
805 return util.binary(self.data())
805 return util.binary(self.data())
806 except IOError:
806 except IOError:
807 return False
807 return False
808 def isexec(self):
808 def isexec(self):
809 return 'x' in self.flags()
809 return 'x' in self.flags()
810 def islink(self):
810 def islink(self):
811 return 'l' in self.flags()
811 return 'l' in self.flags()
812
812
813 def isabsent(self):
813 def isabsent(self):
814 """whether this filectx represents a file not in self._changectx
814 """whether this filectx represents a file not in self._changectx
815
815
816 This is mainly for merge code to detect change/delete conflicts. This is
816 This is mainly for merge code to detect change/delete conflicts. This is
817 expected to be True for all subclasses of basectx."""
817 expected to be True for all subclasses of basectx."""
818 return False
818 return False
819
819
820 _customcmp = False
820 _customcmp = False
821 def cmp(self, fctx):
821 def cmp(self, fctx):
822 """compare with other file context
822 """compare with other file context
823
823
824 returns True if different than fctx.
824 returns True if different than fctx.
825 """
825 """
826 if fctx._customcmp:
826 if fctx._customcmp:
827 return fctx.cmp(self)
827 return fctx.cmp(self)
828
828
829 if (fctx._filenode is None
829 if (fctx._filenode is None
830 and (self._repo._encodefilterpats
830 and (self._repo._encodefilterpats
831 # if file data starts with '\1\n', empty metadata block is
831 # if file data starts with '\1\n', empty metadata block is
832 # prepended, which adds 4 bytes to filelog.size().
832 # prepended, which adds 4 bytes to filelog.size().
833 or self.size() - 4 == fctx.size())
833 or self.size() - 4 == fctx.size())
834 or self.size() == fctx.size()):
834 or self.size() == fctx.size()):
835 return self._filelog.cmp(self._filenode, fctx.data())
835 return self._filelog.cmp(self._filenode, fctx.data())
836
836
837 return True
837 return True
838
838
839 def _adjustlinkrev(self, srcrev, inclusive=False):
839 def _adjustlinkrev(self, srcrev, inclusive=False):
840 """return the first ancestor of <srcrev> introducing <fnode>
840 """return the first ancestor of <srcrev> introducing <fnode>
841
841
842 If the linkrev of the file revision does not point to an ancestor of
842 If the linkrev of the file revision does not point to an ancestor of
843 srcrev, we'll walk down the ancestors until we find one introducing
843 srcrev, we'll walk down the ancestors until we find one introducing
844 this file revision.
844 this file revision.
845
845
846 :srcrev: the changeset revision we search ancestors from
846 :srcrev: the changeset revision we search ancestors from
847 :inclusive: if true, the src revision will also be checked
847 :inclusive: if true, the src revision will also be checked
848 """
848 """
849 repo = self._repo
849 repo = self._repo
850 cl = repo.unfiltered().changelog
850 cl = repo.unfiltered().changelog
851 mfl = repo.manifestlog
851 mfl = repo.manifestlog
852 # fetch the linkrev
852 # fetch the linkrev
853 lkr = self.linkrev()
853 lkr = self.linkrev()
854 # hack to reuse ancestor computation when searching for renames
854 # hack to reuse ancestor computation when searching for renames
855 memberanc = getattr(self, '_ancestrycontext', None)
855 memberanc = getattr(self, '_ancestrycontext', None)
856 iteranc = None
856 iteranc = None
857 if srcrev is None:
857 if srcrev is None:
858 # wctx case, used by workingfilectx during mergecopy
858 # wctx case, used by workingfilectx during mergecopy
859 revs = [p.rev() for p in self._repo[None].parents()]
859 revs = [p.rev() for p in self._repo[None].parents()]
860 inclusive = True # we skipped the real (revless) source
860 inclusive = True # we skipped the real (revless) source
861 else:
861 else:
862 revs = [srcrev]
862 revs = [srcrev]
863 if memberanc is None:
863 if memberanc is None:
864 memberanc = iteranc = cl.ancestors(revs, lkr,
864 memberanc = iteranc = cl.ancestors(revs, lkr,
865 inclusive=inclusive)
865 inclusive=inclusive)
866 # check if this linkrev is an ancestor of srcrev
866 # check if this linkrev is an ancestor of srcrev
867 if lkr not in memberanc:
867 if lkr not in memberanc:
868 if iteranc is None:
868 if iteranc is None:
869 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
869 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
870 fnode = self._filenode
870 fnode = self._filenode
871 path = self._path
871 path = self._path
872 for a in iteranc:
872 for a in iteranc:
873 ac = cl.read(a) # get changeset data (we avoid object creation)
873 ac = cl.read(a) # get changeset data (we avoid object creation)
874 if path in ac[3]: # checking the 'files' field.
874 if path in ac[3]: # checking the 'files' field.
875 # The file has been touched, check if the content is
875 # The file has been touched, check if the content is
876 # similar to the one we search for.
876 # similar to the one we search for.
877 if fnode == mfl[ac[0]].readfast().get(path):
877 if fnode == mfl[ac[0]].readfast().get(path):
878 return a
878 return a
879 # In theory, we should never get out of that loop without a result.
879 # In theory, we should never get out of that loop without a result.
880 # But if manifest uses a buggy file revision (not children of the
880 # But if manifest uses a buggy file revision (not children of the
881 # one it replaces) we could. Such a buggy situation will likely
881 # one it replaces) we could. Such a buggy situation will likely
882 # result is crash somewhere else at to some point.
882 # result is crash somewhere else at to some point.
883 return lkr
883 return lkr
884
884
885 def introrev(self):
885 def introrev(self):
886 """return the rev of the changeset which introduced this file revision
886 """return the rev of the changeset which introduced this file revision
887
887
888 This method is different from linkrev because it take into account the
888 This method is different from linkrev because it take into account the
889 changeset the filectx was created from. It ensures the returned
889 changeset the filectx was created from. It ensures the returned
890 revision is one of its ancestors. This prevents bugs from
890 revision is one of its ancestors. This prevents bugs from
891 'linkrev-shadowing' when a file revision is used by multiple
891 'linkrev-shadowing' when a file revision is used by multiple
892 changesets.
892 changesets.
893 """
893 """
894 lkr = self.linkrev()
894 lkr = self.linkrev()
895 attrs = vars(self)
895 attrs = vars(self)
896 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
896 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
897 if noctx or self.rev() == lkr:
897 if noctx or self.rev() == lkr:
898 return self.linkrev()
898 return self.linkrev()
899 return self._adjustlinkrev(self.rev(), inclusive=True)
899 return self._adjustlinkrev(self.rev(), inclusive=True)
900
900
901 def _parentfilectx(self, path, fileid, filelog):
901 def _parentfilectx(self, path, fileid, filelog):
902 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
902 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
903 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
903 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
904 if '_changeid' in vars(self) or '_changectx' in vars(self):
904 if '_changeid' in vars(self) or '_changectx' in vars(self):
905 # If self is associated with a changeset (probably explicitly
905 # If self is associated with a changeset (probably explicitly
906 # fed), ensure the created filectx is associated with a
906 # fed), ensure the created filectx is associated with a
907 # changeset that is an ancestor of self.changectx.
907 # changeset that is an ancestor of self.changectx.
908 # This lets us later use _adjustlinkrev to get a correct link.
908 # This lets us later use _adjustlinkrev to get a correct link.
909 fctx._descendantrev = self.rev()
909 fctx._descendantrev = self.rev()
910 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
910 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
911 elif '_descendantrev' in vars(self):
911 elif '_descendantrev' in vars(self):
912 # Otherwise propagate _descendantrev if we have one associated.
912 # Otherwise propagate _descendantrev if we have one associated.
913 fctx._descendantrev = self._descendantrev
913 fctx._descendantrev = self._descendantrev
914 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
914 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
915 return fctx
915 return fctx
916
916
917 def parents(self):
917 def parents(self):
918 _path = self._path
918 _path = self._path
919 fl = self._filelog
919 fl = self._filelog
920 parents = self._filelog.parents(self._filenode)
920 parents = self._filelog.parents(self._filenode)
921 pl = [(_path, node, fl) for node in parents if node != nullid]
921 pl = [(_path, node, fl) for node in parents if node != nullid]
922
922
923 r = fl.renamed(self._filenode)
923 r = fl.renamed(self._filenode)
924 if r:
924 if r:
925 # - In the simple rename case, both parent are nullid, pl is empty.
925 # - In the simple rename case, both parent are nullid, pl is empty.
926 # - In case of merge, only one of the parent is null id and should
926 # - In case of merge, only one of the parent is null id and should
927 # be replaced with the rename information. This parent is -always-
927 # be replaced with the rename information. This parent is -always-
928 # the first one.
928 # the first one.
929 #
929 #
930 # As null id have always been filtered out in the previous list
930 # As null id have always been filtered out in the previous list
931 # comprehension, inserting to 0 will always result in "replacing
931 # comprehension, inserting to 0 will always result in "replacing
932 # first nullid parent with rename information.
932 # first nullid parent with rename information.
933 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
933 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
934
934
935 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
935 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
936
936
937 def p1(self):
937 def p1(self):
938 return self.parents()[0]
938 return self.parents()[0]
939
939
940 def p2(self):
940 def p2(self):
941 p = self.parents()
941 p = self.parents()
942 if len(p) == 2:
942 if len(p) == 2:
943 return p[1]
943 return p[1]
944 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
944 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
945
945
946 def annotate(self, follow=False, linenumber=False, diffopts=None):
946 def annotate(self, follow=False, linenumber=False, diffopts=None):
947 '''returns a list of tuples of ((ctx, number), line) for each line
947 '''returns a list of tuples of ((ctx, number), line) for each line
948 in the file, where ctx is the filectx of the node where
948 in the file, where ctx is the filectx of the node where
949 that line was last changed; if linenumber parameter is true, number is
949 that line was last changed; if linenumber parameter is true, number is
950 the line number at the first appearance in the managed file, otherwise,
950 the line number at the first appearance in the managed file, otherwise,
951 number has a fixed value of False.
951 number has a fixed value of False.
952 '''
952 '''
953
953
954 def lines(text):
954 def lines(text):
955 if text.endswith("\n"):
955 if text.endswith("\n"):
956 return text.count("\n")
956 return text.count("\n")
957 return text.count("\n") + int(bool(text))
957 return text.count("\n") + int(bool(text))
958
958
959 if linenumber:
959 if linenumber:
960 def decorate(text, rev):
960 def decorate(text, rev):
961 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
961 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
962 else:
962 else:
963 def decorate(text, rev):
963 def decorate(text, rev):
964 return ([(rev, False)] * lines(text), text)
964 return ([(rev, False)] * lines(text), text)
965
965
966 def pair(parent, child):
966 def pair(parent, child):
967 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
967 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
968 for (a1, a2, b1, b2), t in blocks:
968 for (a1, a2, b1, b2), t in blocks:
969 # Changed blocks ('!') or blocks made only of blank lines ('~')
969 # Changed blocks ('!') or blocks made only of blank lines ('~')
970 # belong to the child.
970 # belong to the child.
971 if t == '=':
971 if t == '=':
972 child[0][b1:b2] = parent[0][a1:a2]
972 child[0][b1:b2] = parent[0][a1:a2]
973 return child
973 return child
974
974
975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
975 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
976
976
977 def parents(f):
977 def parents(f):
978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
978 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
979 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
980 # from the topmost introrev (= srcrev) down to p.linkrev() if it
981 # isn't an ancestor of the srcrev.
981 # isn't an ancestor of the srcrev.
982 f._changeid
982 f._changeid
983 pl = f.parents()
983 pl = f.parents()
984
984
985 # Don't return renamed parents if we aren't following.
985 # Don't return renamed parents if we aren't following.
986 if not follow:
986 if not follow:
987 pl = [p for p in pl if p.path() == f.path()]
987 pl = [p for p in pl if p.path() == f.path()]
988
988
989 # renamed filectx won't have a filelog yet, so set it
989 # renamed filectx won't have a filelog yet, so set it
990 # from the cache to save time
990 # from the cache to save time
991 for p in pl:
991 for p in pl:
992 if not '_filelog' in p.__dict__:
992 if not '_filelog' in p.__dict__:
993 p._filelog = getlog(p.path())
993 p._filelog = getlog(p.path())
994
994
995 return pl
995 return pl
996
996
997 # use linkrev to find the first changeset where self appeared
997 # use linkrev to find the first changeset where self appeared
998 base = self
998 base = self
999 introrev = self.introrev()
999 introrev = self.introrev()
1000 if self.rev() != introrev:
1000 if self.rev() != introrev:
1001 base = self.filectx(self.filenode(), changeid=introrev)
1001 base = self.filectx(self.filenode(), changeid=introrev)
1002 if getattr(base, '_ancestrycontext', None) is None:
1002 if getattr(base, '_ancestrycontext', None) is None:
1003 cl = self._repo.changelog
1003 cl = self._repo.changelog
1004 if introrev is None:
1004 if introrev is None:
1005 # wctx is not inclusive, but works because _ancestrycontext
1005 # wctx is not inclusive, but works because _ancestrycontext
1006 # is used to test filelog revisions
1006 # is used to test filelog revisions
1007 ac = cl.ancestors([p.rev() for p in base.parents()],
1007 ac = cl.ancestors([p.rev() for p in base.parents()],
1008 inclusive=True)
1008 inclusive=True)
1009 else:
1009 else:
1010 ac = cl.ancestors([introrev], inclusive=True)
1010 ac = cl.ancestors([introrev], inclusive=True)
1011 base._ancestrycontext = ac
1011 base._ancestrycontext = ac
1012
1012
1013 # This algorithm would prefer to be recursive, but Python is a
1013 # This algorithm would prefer to be recursive, but Python is a
1014 # bit recursion-hostile. Instead we do an iterative
1014 # bit recursion-hostile. Instead we do an iterative
1015 # depth-first search.
1015 # depth-first search.
1016
1016
1017 # 1st DFS pre-calculates pcache and needed
1017 # 1st DFS pre-calculates pcache and needed
1018 visit = [base]
1018 visit = [base]
1019 pcache = {}
1019 pcache = {}
1020 needed = {base: 1}
1020 needed = {base: 1}
1021 while visit:
1021 while visit:
1022 f = visit.pop()
1022 f = visit.pop()
1023 if f in pcache:
1023 if f in pcache:
1024 continue
1024 continue
1025 pl = parents(f)
1025 pl = parents(f)
1026 pcache[f] = pl
1026 pcache[f] = pl
1027 for p in pl:
1027 for p in pl:
1028 needed[p] = needed.get(p, 0) + 1
1028 needed[p] = needed.get(p, 0) + 1
1029 if p not in pcache:
1029 if p not in pcache:
1030 visit.append(p)
1030 visit.append(p)
1031
1031
1032 # 2nd DFS does the actual annotate
1032 # 2nd DFS does the actual annotate
1033 visit[:] = [base]
1033 visit[:] = [base]
1034 hist = {}
1034 hist = {}
1035 while visit:
1035 while visit:
1036 f = visit[-1]
1036 f = visit[-1]
1037 if f in hist:
1037 if f in hist:
1038 visit.pop()
1038 visit.pop()
1039 continue
1039 continue
1040
1040
1041 ready = True
1041 ready = True
1042 pl = pcache[f]
1042 pl = pcache[f]
1043 for p in pl:
1043 for p in pl:
1044 if p not in hist:
1044 if p not in hist:
1045 ready = False
1045 ready = False
1046 visit.append(p)
1046 visit.append(p)
1047 if ready:
1047 if ready:
1048 visit.pop()
1048 visit.pop()
1049 curr = decorate(f.data(), f)
1049 curr = decorate(f.data(), f)
1050 for p in pl:
1050 for p in pl:
1051 curr = pair(hist[p], curr)
1051 curr = pair(hist[p], curr)
1052 if needed[p] == 1:
1052 if needed[p] == 1:
1053 del hist[p]
1053 del hist[p]
1054 del needed[p]
1054 del needed[p]
1055 else:
1055 else:
1056 needed[p] -= 1
1056 needed[p] -= 1
1057
1057
1058 hist[f] = curr
1058 hist[f] = curr
1059 del pcache[f]
1059 del pcache[f]
1060
1060
1061 return zip(hist[base][0], hist[base][1].splitlines(True))
1061 return zip(hist[base][0], hist[base][1].splitlines(True))
1062
1062
1063 def ancestors(self, followfirst=False):
1063 def ancestors(self, followfirst=False):
1064 visit = {}
1064 visit = {}
1065 c = self
1065 c = self
1066 if followfirst:
1066 if followfirst:
1067 cut = 1
1067 cut = 1
1068 else:
1068 else:
1069 cut = None
1069 cut = None
1070
1070
1071 while True:
1071 while True:
1072 for parent in c.parents()[:cut]:
1072 for parent in c.parents()[:cut]:
1073 visit[(parent.linkrev(), parent.filenode())] = parent
1073 visit[(parent.linkrev(), parent.filenode())] = parent
1074 if not visit:
1074 if not visit:
1075 break
1075 break
1076 c = visit.pop(max(visit))
1076 c = visit.pop(max(visit))
1077 yield c
1077 yield c
1078
1078
1079 class filectx(basefilectx):
1079 class filectx(basefilectx):
1080 """A filecontext object makes access to data related to a particular
1080 """A filecontext object makes access to data related to a particular
1081 filerevision convenient."""
1081 filerevision convenient."""
1082 def __init__(self, repo, path, changeid=None, fileid=None,
1082 def __init__(self, repo, path, changeid=None, fileid=None,
1083 filelog=None, changectx=None):
1083 filelog=None, changectx=None):
1084 """changeid can be a changeset revision, node, or tag.
1084 """changeid can be a changeset revision, node, or tag.
1085 fileid can be a file revision or node."""
1085 fileid can be a file revision or node."""
1086 self._repo = repo
1086 self._repo = repo
1087 self._path = path
1087 self._path = path
1088
1088
1089 assert (changeid is not None
1089 assert (changeid is not None
1090 or fileid is not None
1090 or fileid is not None
1091 or changectx is not None), \
1091 or changectx is not None), \
1092 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1092 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1093 % (changeid, fileid, changectx))
1093 % (changeid, fileid, changectx))
1094
1094
1095 if filelog is not None:
1095 if filelog is not None:
1096 self._filelog = filelog
1096 self._filelog = filelog
1097
1097
1098 if changeid is not None:
1098 if changeid is not None:
1099 self._changeid = changeid
1099 self._changeid = changeid
1100 if changectx is not None:
1100 if changectx is not None:
1101 self._changectx = changectx
1101 self._changectx = changectx
1102 if fileid is not None:
1102 if fileid is not None:
1103 self._fileid = fileid
1103 self._fileid = fileid
1104
1104
1105 @propertycache
1105 @propertycache
1106 def _changectx(self):
1106 def _changectx(self):
1107 try:
1107 try:
1108 return changectx(self._repo, self._changeid)
1108 return changectx(self._repo, self._changeid)
1109 except error.FilteredRepoLookupError:
1109 except error.FilteredRepoLookupError:
1110 # Linkrev may point to any revision in the repository. When the
1110 # Linkrev may point to any revision in the repository. When the
1111 # repository is filtered this may lead to `filectx` trying to build
1111 # repository is filtered this may lead to `filectx` trying to build
1112 # `changectx` for filtered revision. In such case we fallback to
1112 # `changectx` for filtered revision. In such case we fallback to
1113 # creating `changectx` on the unfiltered version of the reposition.
1113 # creating `changectx` on the unfiltered version of the reposition.
1114 # This fallback should not be an issue because `changectx` from
1114 # This fallback should not be an issue because `changectx` from
1115 # `filectx` are not used in complex operations that care about
1115 # `filectx` are not used in complex operations that care about
1116 # filtering.
1116 # filtering.
1117 #
1117 #
1118 # This fallback is a cheap and dirty fix that prevent several
1118 # This fallback is a cheap and dirty fix that prevent several
1119 # crashes. It does not ensure the behavior is correct. However the
1119 # crashes. It does not ensure the behavior is correct. However the
1120 # behavior was not correct before filtering either and "incorrect
1120 # behavior was not correct before filtering either and "incorrect
1121 # behavior" is seen as better as "crash"
1121 # behavior" is seen as better as "crash"
1122 #
1122 #
1123 # Linkrevs have several serious troubles with filtering that are
1123 # Linkrevs have several serious troubles with filtering that are
1124 # complicated to solve. Proper handling of the issue here should be
1124 # complicated to solve. Proper handling of the issue here should be
1125 # considered when solving linkrev issue are on the table.
1125 # considered when solving linkrev issue are on the table.
1126 return changectx(self._repo.unfiltered(), self._changeid)
1126 return changectx(self._repo.unfiltered(), self._changeid)
1127
1127
1128 def filectx(self, fileid, changeid=None):
1128 def filectx(self, fileid, changeid=None):
1129 '''opens an arbitrary revision of the file without
1129 '''opens an arbitrary revision of the file without
1130 opening a new filelog'''
1130 opening a new filelog'''
1131 return filectx(self._repo, self._path, fileid=fileid,
1131 return filectx(self._repo, self._path, fileid=fileid,
1132 filelog=self._filelog, changeid=changeid)
1132 filelog=self._filelog, changeid=changeid)
1133
1133
1134 def rawdata(self):
1134 def rawdata(self):
1135 return self._filelog.revision(self._filenode, raw=True)
1135 return self._filelog.revision(self._filenode, raw=True)
1136
1136
1137 def data(self):
1137 def data(self):
1138 try:
1138 try:
1139 return self._filelog.read(self._filenode)
1139 return self._filelog.read(self._filenode)
1140 except error.CensoredNodeError:
1140 except error.CensoredNodeError:
1141 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1141 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1142 return ""
1142 return ""
1143 raise error.Abort(_("censored node: %s") % short(self._filenode),
1143 raise error.Abort(_("censored node: %s") % short(self._filenode),
1144 hint=_("set censor.policy to ignore errors"))
1144 hint=_("set censor.policy to ignore errors"))
1145
1145
1146 def size(self):
1146 def size(self):
1147 return self._filelog.size(self._filerev)
1147 return self._filelog.size(self._filerev)
1148
1148
1149 def renamed(self):
1149 def renamed(self):
1150 """check if file was actually renamed in this changeset revision
1150 """check if file was actually renamed in this changeset revision
1151
1151
1152 If rename logged in file revision, we report copy for changeset only
1152 If rename logged in file revision, we report copy for changeset only
1153 if file revisions linkrev points back to the changeset in question
1153 if file revisions linkrev points back to the changeset in question
1154 or both changeset parents contain different file revisions.
1154 or both changeset parents contain different file revisions.
1155 """
1155 """
1156
1156
1157 renamed = self._filelog.renamed(self._filenode)
1157 renamed = self._filelog.renamed(self._filenode)
1158 if not renamed:
1158 if not renamed:
1159 return renamed
1159 return renamed
1160
1160
1161 if self.rev() == self.linkrev():
1161 if self.rev() == self.linkrev():
1162 return renamed
1162 return renamed
1163
1163
1164 name = self.path()
1164 name = self.path()
1165 fnode = self._filenode
1165 fnode = self._filenode
1166 for p in self._changectx.parents():
1166 for p in self._changectx.parents():
1167 try:
1167 try:
1168 if fnode == p.filenode(name):
1168 if fnode == p.filenode(name):
1169 return None
1169 return None
1170 except error.LookupError:
1170 except error.LookupError:
1171 pass
1171 pass
1172 return renamed
1172 return renamed
1173
1173
1174 def children(self):
1174 def children(self):
1175 # hard for renames
1175 # hard for renames
1176 c = self._filelog.children(self._filenode)
1176 c = self._filelog.children(self._filenode)
1177 return [filectx(self._repo, self._path, fileid=x,
1177 return [filectx(self._repo, self._path, fileid=x,
1178 filelog=self._filelog) for x in c]
1178 filelog=self._filelog) for x in c]
1179
1179
1180 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1180 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1181 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1181 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1182 if diff from fctx2 to fctx1 has changes in linerange2 and
1182 if diff from fctx2 to fctx1 has changes in linerange2 and
1183 `linerange1` is the new line range for fctx1.
1183 `linerange1` is the new line range for fctx1.
1184 """
1184 """
1185 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1185 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1186 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1186 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1187 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1187 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1188 return diffinrange, linerange1
1188 return diffinrange, linerange1
1189
1189
1190 def blockancestors(fctx, fromline, toline, followfirst=False):
1190 def blockancestors(fctx, fromline, toline, followfirst=False):
1191 """Yield ancestors of `fctx` with respect to the block of lines within
1191 """Yield ancestors of `fctx` with respect to the block of lines within
1192 `fromline`-`toline` range.
1192 `fromline`-`toline` range.
1193 """
1193 """
1194 diffopts = patch.diffopts(fctx._repo.ui)
1194 diffopts = patch.diffopts(fctx._repo.ui)
1195 introrev = fctx.introrev()
1195 introrev = fctx.introrev()
1196 if fctx.rev() != introrev:
1196 if fctx.rev() != introrev:
1197 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1197 fctx = fctx.filectx(fctx.filenode(), changeid=introrev)
1198 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1198 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1199 while visit:
1199 while visit:
1200 c, linerange2 = visit.pop(max(visit))
1200 c, linerange2 = visit.pop(max(visit))
1201 pl = c.parents()
1201 pl = c.parents()
1202 if followfirst:
1202 if followfirst:
1203 pl = pl[:1]
1203 pl = pl[:1]
1204 if not pl:
1204 if not pl:
1205 # The block originates from the initial revision.
1205 # The block originates from the initial revision.
1206 yield c, linerange2
1206 yield c, linerange2
1207 continue
1207 continue
1208 inrange = False
1208 inrange = False
1209 for p in pl:
1209 for p in pl:
1210 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1210 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1211 inrange = inrange or inrangep
1211 inrange = inrange or inrangep
1212 if linerange1[0] == linerange1[1]:
1212 if linerange1[0] == linerange1[1]:
1213 # Parent's linerange is empty, meaning that the block got
1213 # Parent's linerange is empty, meaning that the block got
1214 # introduced in this revision; no need to go futher in this
1214 # introduced in this revision; no need to go futher in this
1215 # branch.
1215 # branch.
1216 continue
1216 continue
1217 # Set _descendantrev with 'c' (a known descendant) so that, when
1217 # Set _descendantrev with 'c' (a known descendant) so that, when
1218 # _adjustlinkrev is called for 'p', it receives this descendant
1218 # _adjustlinkrev is called for 'p', it receives this descendant
1219 # (as srcrev) instead possibly topmost introrev.
1219 # (as srcrev) instead possibly topmost introrev.
1220 p._descendantrev = c.rev()
1220 p._descendantrev = c.rev()
1221 visit[p.linkrev(), p.filenode()] = p, linerange1
1221 visit[p.linkrev(), p.filenode()] = p, linerange1
1222 if inrange:
1222 if inrange:
1223 yield c, linerange2
1223 yield c, linerange2
1224
1224
1225 def blockdescendants(fctx, fromline, toline):
1225 def blockdescendants(fctx, fromline, toline):
1226 """Yield descendants of `fctx` with respect to the block of lines within
1226 """Yield descendants of `fctx` with respect to the block of lines within
1227 `fromline`-`toline` range.
1227 `fromline`-`toline` range.
1228 """
1228 """
1229 # First possibly yield 'fctx' if it has changes in range with respect to
1229 # First possibly yield 'fctx' if it has changes in range with respect to
1230 # its parents.
1230 # its parents.
1231 try:
1231 try:
1232 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1232 c, linerange1 = next(blockancestors(fctx, fromline, toline))
1233 except StopIteration:
1233 except StopIteration:
1234 pass
1234 pass
1235 else:
1235 else:
1236 if c == fctx:
1236 if c == fctx:
1237 yield c, linerange1
1237 yield c, linerange1
1238
1238
1239 diffopts = patch.diffopts(fctx._repo.ui)
1239 diffopts = patch.diffopts(fctx._repo.ui)
1240 fl = fctx.filelog()
1240 fl = fctx.filelog()
1241 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1241 seen = {fctx.filerev(): (fctx, (fromline, toline))}
1242 for i in fl.descendants([fctx.filerev()]):
1242 for i in fl.descendants([fctx.filerev()]):
1243 c = fctx.filectx(i)
1243 c = fctx.filectx(i)
1244 inrange = False
1244 inrange = False
1245 for x in fl.parentrevs(i):
1245 for x in fl.parentrevs(i):
1246 try:
1246 try:
1247 p, linerange2 = seen[x]
1247 p, linerange2 = seen[x]
1248 except KeyError:
1248 except KeyError:
1249 # nullrev or other branch
1249 # nullrev or other branch
1250 continue
1250 continue
1251 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1251 inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts)
1252 inrange = inrange or inrangep
1252 inrange = inrange or inrangep
1253 # If revision 'i' has been seen (it's a merge), we assume that its
1253 # If revision 'i' has been seen (it's a merge), we assume that its
1254 # line range is the same independently of which parents was used
1254 # line range is the same independently of which parents was used
1255 # to compute it.
1255 # to compute it.
1256 assert i not in seen or seen[i][1] == linerange1, (
1256 assert i not in seen or seen[i][1] == linerange1, (
1257 'computed line range for %s is not consistent between '
1257 'computed line range for %s is not consistent between '
1258 'ancestor branches' % c)
1258 'ancestor branches' % c)
1259 seen[i] = c, linerange1
1259 seen[i] = c, linerange1
1260 if inrange:
1260 if inrange:
1261 yield c, linerange1
1261 yield c, linerange1
1262
1262
1263 class committablectx(basectx):
1263 class committablectx(basectx):
1264 """A committablectx object provides common functionality for a context that
1264 """A committablectx object provides common functionality for a context that
1265 wants the ability to commit, e.g. workingctx or memctx."""
1265 wants the ability to commit, e.g. workingctx or memctx."""
1266 def __init__(self, repo, text="", user=None, date=None, extra=None,
1266 def __init__(self, repo, text="", user=None, date=None, extra=None,
1267 changes=None):
1267 changes=None):
1268 self._repo = repo
1268 self._repo = repo
1269 self._rev = None
1269 self._rev = None
1270 self._node = None
1270 self._node = None
1271 self._text = text
1271 self._text = text
1272 if date:
1272 if date:
1273 self._date = util.parsedate(date)
1273 self._date = util.parsedate(date)
1274 if user:
1274 if user:
1275 self._user = user
1275 self._user = user
1276 if changes:
1276 if changes:
1277 self._status = changes
1277 self._status = changes
1278
1278
1279 self._extra = {}
1279 self._extra = {}
1280 if extra:
1280 if extra:
1281 self._extra = extra.copy()
1281 self._extra = extra.copy()
1282 if 'branch' not in self._extra:
1282 if 'branch' not in self._extra:
1283 try:
1283 try:
1284 branch = encoding.fromlocal(self._repo.dirstate.branch())
1284 branch = encoding.fromlocal(self._repo.dirstate.branch())
1285 except UnicodeDecodeError:
1285 except UnicodeDecodeError:
1286 raise error.Abort(_('branch name not in UTF-8!'))
1286 raise error.Abort(_('branch name not in UTF-8!'))
1287 self._extra['branch'] = branch
1287 self._extra['branch'] = branch
1288 if self._extra['branch'] == '':
1288 if self._extra['branch'] == '':
1289 self._extra['branch'] = 'default'
1289 self._extra['branch'] = 'default'
1290
1290
1291 def __str__(self):
1291 def __str__(self):
1292 return str(self._parents[0]) + "+"
1292 return str(self._parents[0]) + "+"
1293
1293
1294 def __nonzero__(self):
1294 def __nonzero__(self):
1295 return True
1295 return True
1296
1296
1297 __bool__ = __nonzero__
1297 __bool__ = __nonzero__
1298
1298
1299 def _buildflagfunc(self):
1299 def _buildflagfunc(self):
1300 # Create a fallback function for getting file flags when the
1300 # Create a fallback function for getting file flags when the
1301 # filesystem doesn't support them
1301 # filesystem doesn't support them
1302
1302
1303 copiesget = self._repo.dirstate.copies().get
1303 copiesget = self._repo.dirstate.copies().get
1304 parents = self.parents()
1304 parents = self.parents()
1305 if len(parents) < 2:
1305 if len(parents) < 2:
1306 # when we have one parent, it's easy: copy from parent
1306 # when we have one parent, it's easy: copy from parent
1307 man = parents[0].manifest()
1307 man = parents[0].manifest()
1308 def func(f):
1308 def func(f):
1309 f = copiesget(f, f)
1309 f = copiesget(f, f)
1310 return man.flags(f)
1310 return man.flags(f)
1311 else:
1311 else:
1312 # merges are tricky: we try to reconstruct the unstored
1312 # merges are tricky: we try to reconstruct the unstored
1313 # result from the merge (issue1802)
1313 # result from the merge (issue1802)
1314 p1, p2 = parents
1314 p1, p2 = parents
1315 pa = p1.ancestor(p2)
1315 pa = p1.ancestor(p2)
1316 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1316 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1317
1317
1318 def func(f):
1318 def func(f):
1319 f = copiesget(f, f) # may be wrong for merges with copies
1319 f = copiesget(f, f) # may be wrong for merges with copies
1320 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1320 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1321 if fl1 == fl2:
1321 if fl1 == fl2:
1322 return fl1
1322 return fl1
1323 if fl1 == fla:
1323 if fl1 == fla:
1324 return fl2
1324 return fl2
1325 if fl2 == fla:
1325 if fl2 == fla:
1326 return fl1
1326 return fl1
1327 return '' # punt for conflicts
1327 return '' # punt for conflicts
1328
1328
1329 return func
1329 return func
1330
1330
1331 @propertycache
1331 @propertycache
1332 def _flagfunc(self):
1332 def _flagfunc(self):
1333 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1333 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1334
1334
1335 @propertycache
1335 @propertycache
1336 def _status(self):
1336 def _status(self):
1337 return self._repo.status()
1337 return self._repo.status()
1338
1338
1339 @propertycache
1339 @propertycache
1340 def _user(self):
1340 def _user(self):
1341 return self._repo.ui.username()
1341 return self._repo.ui.username()
1342
1342
1343 @propertycache
1343 @propertycache
1344 def _date(self):
1344 def _date(self):
1345 return util.makedate()
1345 return util.makedate()
1346
1346
1347 def subrev(self, subpath):
1347 def subrev(self, subpath):
1348 return None
1348 return None
1349
1349
1350 def manifestnode(self):
1350 def manifestnode(self):
1351 return None
1351 return None
1352 def user(self):
1352 def user(self):
1353 return self._user or self._repo.ui.username()
1353 return self._user or self._repo.ui.username()
1354 def date(self):
1354 def date(self):
1355 return self._date
1355 return self._date
1356 def description(self):
1356 def description(self):
1357 return self._text
1357 return self._text
1358 def files(self):
1358 def files(self):
1359 return sorted(self._status.modified + self._status.added +
1359 return sorted(self._status.modified + self._status.added +
1360 self._status.removed)
1360 self._status.removed)
1361
1361
1362 def modified(self):
1362 def modified(self):
1363 return self._status.modified
1363 return self._status.modified
1364 def added(self):
1364 def added(self):
1365 return self._status.added
1365 return self._status.added
1366 def removed(self):
1366 def removed(self):
1367 return self._status.removed
1367 return self._status.removed
1368 def deleted(self):
1368 def deleted(self):
1369 return self._status.deleted
1369 return self._status.deleted
1370 def branch(self):
1370 def branch(self):
1371 return encoding.tolocal(self._extra['branch'])
1371 return encoding.tolocal(self._extra['branch'])
1372 def closesbranch(self):
1372 def closesbranch(self):
1373 return 'close' in self._extra
1373 return 'close' in self._extra
1374 def extra(self):
1374 def extra(self):
1375 return self._extra
1375 return self._extra
1376
1376
1377 def tags(self):
1377 def tags(self):
1378 return []
1378 return []
1379
1379
1380 def bookmarks(self):
1380 def bookmarks(self):
1381 b = []
1381 b = []
1382 for p in self.parents():
1382 for p in self.parents():
1383 b.extend(p.bookmarks())
1383 b.extend(p.bookmarks())
1384 return b
1384 return b
1385
1385
1386 def phase(self):
1386 def phase(self):
1387 phase = phases.draft # default phase to draft
1387 phase = phases.draft # default phase to draft
1388 for p in self.parents():
1388 for p in self.parents():
1389 phase = max(phase, p.phase())
1389 phase = max(phase, p.phase())
1390 return phase
1390 return phase
1391
1391
1392 def hidden(self):
1392 def hidden(self):
1393 return False
1393 return False
1394
1394
1395 def children(self):
1395 def children(self):
1396 return []
1396 return []
1397
1397
1398 def flags(self, path):
1398 def flags(self, path):
1399 if '_manifest' in self.__dict__:
1399 if '_manifest' in self.__dict__:
1400 try:
1400 try:
1401 return self._manifest.flags(path)
1401 return self._manifest.flags(path)
1402 except KeyError:
1402 except KeyError:
1403 return ''
1403 return ''
1404
1404
1405 try:
1405 try:
1406 return self._flagfunc(path)
1406 return self._flagfunc(path)
1407 except OSError:
1407 except OSError:
1408 return ''
1408 return ''
1409
1409
1410 def ancestor(self, c2):
1410 def ancestor(self, c2):
1411 """return the "best" ancestor context of self and c2"""
1411 """return the "best" ancestor context of self and c2"""
1412 return self._parents[0].ancestor(c2) # punt on two parents for now
1412 return self._parents[0].ancestor(c2) # punt on two parents for now
1413
1413
1414 def walk(self, match):
1414 def walk(self, match):
1415 '''Generates matching file names.'''
1415 '''Generates matching file names.'''
1416 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1416 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1417 True, False))
1417 True, False))
1418
1418
1419 def matches(self, match):
1419 def matches(self, match):
1420 return sorted(self._repo.dirstate.matches(match))
1420 return sorted(self._repo.dirstate.matches(match))
1421
1421
1422 def ancestors(self):
1422 def ancestors(self):
1423 for p in self._parents:
1423 for p in self._parents:
1424 yield p
1424 yield p
1425 for a in self._repo.changelog.ancestors(
1425 for a in self._repo.changelog.ancestors(
1426 [p.rev() for p in self._parents]):
1426 [p.rev() for p in self._parents]):
1427 yield changectx(self._repo, a)
1427 yield changectx(self._repo, a)
1428
1428
1429 def markcommitted(self, node):
1429 def markcommitted(self, node):
1430 """Perform post-commit cleanup necessary after committing this ctx
1430 """Perform post-commit cleanup necessary after committing this ctx
1431
1431
1432 Specifically, this updates backing stores this working context
1432 Specifically, this updates backing stores this working context
1433 wraps to reflect the fact that the changes reflected by this
1433 wraps to reflect the fact that the changes reflected by this
1434 workingctx have been committed. For example, it marks
1434 workingctx have been committed. For example, it marks
1435 modified and added files as normal in the dirstate.
1435 modified and added files as normal in the dirstate.
1436
1436
1437 """
1437 """
1438
1438
1439 self._repo.dirstate.beginparentchange()
1439 self._repo.dirstate.beginparentchange()
1440 for f in self.modified() + self.added():
1440 for f in self.modified() + self.added():
1441 self._repo.dirstate.normal(f)
1441 self._repo.dirstate.normal(f)
1442 for f in self.removed():
1442 for f in self.removed():
1443 self._repo.dirstate.drop(f)
1443 self._repo.dirstate.drop(f)
1444 self._repo.dirstate.setparents(node)
1444 self._repo.dirstate.setparents(node)
1445 self._repo.dirstate.endparentchange()
1445 self._repo.dirstate.endparentchange()
1446
1446
1447 # write changes out explicitly, because nesting wlock at
1447 # write changes out explicitly, because nesting wlock at
1448 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1448 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1449 # from immediately doing so for subsequent changing files
1449 # from immediately doing so for subsequent changing files
1450 self._repo.dirstate.write(self._repo.currenttransaction())
1450 self._repo.dirstate.write(self._repo.currenttransaction())
1451
1451
1452 class workingctx(committablectx):
1452 class workingctx(committablectx):
1453 """A workingctx object makes access to data related to
1453 """A workingctx object makes access to data related to
1454 the current working directory convenient.
1454 the current working directory convenient.
1455 date - any valid date string or (unixtime, offset), or None.
1455 date - any valid date string or (unixtime, offset), or None.
1456 user - username string, or None.
1456 user - username string, or None.
1457 extra - a dictionary of extra values, or None.
1457 extra - a dictionary of extra values, or None.
1458 changes - a list of file lists as returned by localrepo.status()
1458 changes - a list of file lists as returned by localrepo.status()
1459 or None to use the repository status.
1459 or None to use the repository status.
1460 """
1460 """
1461 def __init__(self, repo, text="", user=None, date=None, extra=None,
1461 def __init__(self, repo, text="", user=None, date=None, extra=None,
1462 changes=None):
1462 changes=None):
1463 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1463 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1464
1464
1465 def __iter__(self):
1465 def __iter__(self):
1466 d = self._repo.dirstate
1466 d = self._repo.dirstate
1467 for f in d:
1467 for f in d:
1468 if d[f] != 'r':
1468 if d[f] != 'r':
1469 yield f
1469 yield f
1470
1470
1471 def __contains__(self, key):
1471 def __contains__(self, key):
1472 return self._repo.dirstate[key] not in "?r"
1472 return self._repo.dirstate[key] not in "?r"
1473
1473
1474 def hex(self):
1474 def hex(self):
1475 return hex(wdirid)
1475 return hex(wdirid)
1476
1476
1477 @propertycache
1477 @propertycache
1478 def _parents(self):
1478 def _parents(self):
1479 p = self._repo.dirstate.parents()
1479 p = self._repo.dirstate.parents()
1480 if p[1] == nullid:
1480 if p[1] == nullid:
1481 p = p[:-1]
1481 p = p[:-1]
1482 return [changectx(self._repo, x) for x in p]
1482 return [changectx(self._repo, x) for x in p]
1483
1483
1484 def filectx(self, path, filelog=None):
1484 def filectx(self, path, filelog=None):
1485 """get a file context from the working directory"""
1485 """get a file context from the working directory"""
1486 return workingfilectx(self._repo, path, workingctx=self,
1486 return workingfilectx(self._repo, path, workingctx=self,
1487 filelog=filelog)
1487 filelog=filelog)
1488
1488
1489 def dirty(self, missing=False, merge=True, branch=True):
1489 def dirty(self, missing=False, merge=True, branch=True):
1490 "check whether a working directory is modified"
1490 "check whether a working directory is modified"
1491 # check subrepos first
1491 # check subrepos first
1492 for s in sorted(self.substate):
1492 for s in sorted(self.substate):
1493 if self.sub(s).dirty():
1493 if self.sub(s).dirty():
1494 return True
1494 return True
1495 # check current working dir
1495 # check current working dir
1496 return ((merge and self.p2()) or
1496 return ((merge and self.p2()) or
1497 (branch and self.branch() != self.p1().branch()) or
1497 (branch and self.branch() != self.p1().branch()) or
1498 self.modified() or self.added() or self.removed() or
1498 self.modified() or self.added() or self.removed() or
1499 (missing and self.deleted()))
1499 (missing and self.deleted()))
1500
1500
1501 def add(self, list, prefix=""):
1501 def add(self, list, prefix=""):
1502 join = lambda f: os.path.join(prefix, f)
1502 join = lambda f: os.path.join(prefix, f)
1503 with self._repo.wlock():
1503 with self._repo.wlock():
1504 ui, ds = self._repo.ui, self._repo.dirstate
1504 ui, ds = self._repo.ui, self._repo.dirstate
1505 rejected = []
1505 rejected = []
1506 lstat = self._repo.wvfs.lstat
1506 lstat = self._repo.wvfs.lstat
1507 for f in list:
1507 for f in list:
1508 scmutil.checkportable(ui, join(f))
1508 scmutil.checkportable(ui, join(f))
1509 try:
1509 try:
1510 st = lstat(f)
1510 st = lstat(f)
1511 except OSError:
1511 except OSError:
1512 ui.warn(_("%s does not exist!\n") % join(f))
1512 ui.warn(_("%s does not exist!\n") % join(f))
1513 rejected.append(f)
1513 rejected.append(f)
1514 continue
1514 continue
1515 if st.st_size > 10000000:
1515 if st.st_size > 10000000:
1516 ui.warn(_("%s: up to %d MB of RAM may be required "
1516 ui.warn(_("%s: up to %d MB of RAM may be required "
1517 "to manage this file\n"
1517 "to manage this file\n"
1518 "(use 'hg revert %s' to cancel the "
1518 "(use 'hg revert %s' to cancel the "
1519 "pending addition)\n")
1519 "pending addition)\n")
1520 % (f, 3 * st.st_size // 1000000, join(f)))
1520 % (f, 3 * st.st_size // 1000000, join(f)))
1521 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1521 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1522 ui.warn(_("%s not added: only files and symlinks "
1522 ui.warn(_("%s not added: only files and symlinks "
1523 "supported currently\n") % join(f))
1523 "supported currently\n") % join(f))
1524 rejected.append(f)
1524 rejected.append(f)
1525 elif ds[f] in 'amn':
1525 elif ds[f] in 'amn':
1526 ui.warn(_("%s already tracked!\n") % join(f))
1526 ui.warn(_("%s already tracked!\n") % join(f))
1527 elif ds[f] == 'r':
1527 elif ds[f] == 'r':
1528 ds.normallookup(f)
1528 ds.normallookup(f)
1529 else:
1529 else:
1530 ds.add(f)
1530 ds.add(f)
1531 return rejected
1531 return rejected
1532
1532
1533 def forget(self, files, prefix=""):
1533 def forget(self, files, prefix=""):
1534 join = lambda f: os.path.join(prefix, f)
1534 join = lambda f: os.path.join(prefix, f)
1535 with self._repo.wlock():
1535 with self._repo.wlock():
1536 rejected = []
1536 rejected = []
1537 for f in files:
1537 for f in files:
1538 if f not in self._repo.dirstate:
1538 if f not in self._repo.dirstate:
1539 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1539 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1540 rejected.append(f)
1540 rejected.append(f)
1541 elif self._repo.dirstate[f] != 'a':
1541 elif self._repo.dirstate[f] != 'a':
1542 self._repo.dirstate.remove(f)
1542 self._repo.dirstate.remove(f)
1543 else:
1543 else:
1544 self._repo.dirstate.drop(f)
1544 self._repo.dirstate.drop(f)
1545 return rejected
1545 return rejected
1546
1546
1547 def undelete(self, list):
1547 def undelete(self, list):
1548 pctxs = self.parents()
1548 pctxs = self.parents()
1549 with self._repo.wlock():
1549 with self._repo.wlock():
1550 for f in list:
1550 for f in list:
1551 if self._repo.dirstate[f] != 'r':
1551 if self._repo.dirstate[f] != 'r':
1552 self._repo.ui.warn(_("%s not removed!\n") % f)
1552 self._repo.ui.warn(_("%s not removed!\n") % f)
1553 else:
1553 else:
1554 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1554 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1555 t = fctx.data()
1555 t = fctx.data()
1556 self._repo.wwrite(f, t, fctx.flags())
1556 self._repo.wwrite(f, t, fctx.flags())
1557 self._repo.dirstate.normal(f)
1557 self._repo.dirstate.normal(f)
1558
1558
1559 def copy(self, source, dest):
1559 def copy(self, source, dest):
1560 try:
1560 try:
1561 st = self._repo.wvfs.lstat(dest)
1561 st = self._repo.wvfs.lstat(dest)
1562 except OSError as err:
1562 except OSError as err:
1563 if err.errno != errno.ENOENT:
1563 if err.errno != errno.ENOENT:
1564 raise
1564 raise
1565 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1565 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1566 return
1566 return
1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1567 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1568 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1568 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1569 "symbolic link\n") % dest)
1569 "symbolic link\n") % dest)
1570 else:
1570 else:
1571 with self._repo.wlock():
1571 with self._repo.wlock():
1572 if self._repo.dirstate[dest] in '?':
1572 if self._repo.dirstate[dest] in '?':
1573 self._repo.dirstate.add(dest)
1573 self._repo.dirstate.add(dest)
1574 elif self._repo.dirstate[dest] in 'r':
1574 elif self._repo.dirstate[dest] in 'r':
1575 self._repo.dirstate.normallookup(dest)
1575 self._repo.dirstate.normallookup(dest)
1576 self._repo.dirstate.copy(source, dest)
1576 self._repo.dirstate.copy(source, dest)
1577
1577
1578 def match(self, pats=None, include=None, exclude=None, default='glob',
1578 def match(self, pats=None, include=None, exclude=None, default='glob',
1579 listsubrepos=False, badfn=None):
1579 listsubrepos=False, badfn=None):
1580 if pats is None:
1580 if pats is None:
1581 pats = []
1581 pats = []
1582 r = self._repo
1582 r = self._repo
1583
1583
1584 # Only a case insensitive filesystem needs magic to translate user input
1584 # Only a case insensitive filesystem needs magic to translate user input
1585 # to actual case in the filesystem.
1585 # to actual case in the filesystem.
1586 matcherfunc = matchmod.match
1586 matcherfunc = matchmod.match
1587 if not util.fscasesensitive(r.root):
1587 if not util.fscasesensitive(r.root):
1588 matcherfunc = matchmod.icasefsmatcher
1588 matcherfunc = matchmod.icasefsmatcher
1589 return matcherfunc(r.root, r.getcwd(), pats,
1589 return matcherfunc(r.root, r.getcwd(), pats,
1590 include, exclude, default,
1590 include, exclude, default,
1591 auditor=r.auditor, ctx=self,
1591 auditor=r.auditor, ctx=self,
1592 listsubrepos=listsubrepos, badfn=badfn)
1592 listsubrepos=listsubrepos, badfn=badfn)
1593
1593
1594 def _filtersuspectsymlink(self, files):
1594 def _filtersuspectsymlink(self, files):
1595 if not files or self._repo.dirstate._checklink:
1595 if not files or self._repo.dirstate._checklink:
1596 return files
1596 return files
1597
1597
1598 # Symlink placeholders may get non-symlink-like contents
1598 # Symlink placeholders may get non-symlink-like contents
1599 # via user error or dereferencing by NFS or Samba servers,
1599 # via user error or dereferencing by NFS or Samba servers,
1600 # so we filter out any placeholders that don't look like a
1600 # so we filter out any placeholders that don't look like a
1601 # symlink
1601 # symlink
1602 sane = []
1602 sane = []
1603 for f in files:
1603 for f in files:
1604 if self.flags(f) == 'l':
1604 if self.flags(f) == 'l':
1605 d = self[f].data()
1605 d = self[f].data()
1606 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1606 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1607 self._repo.ui.debug('ignoring suspect symlink placeholder'
1607 self._repo.ui.debug('ignoring suspect symlink placeholder'
1608 ' "%s"\n' % f)
1608 ' "%s"\n' % f)
1609 continue
1609 continue
1610 sane.append(f)
1610 sane.append(f)
1611 return sane
1611 return sane
1612
1612
1613 def _checklookup(self, files):
1613 def _checklookup(self, files):
1614 # check for any possibly clean files
1614 # check for any possibly clean files
1615 if not files:
1615 if not files:
1616 return [], []
1616 return [], [], []
1617
1617
1618 modified = []
1618 modified = []
1619 deleted = []
1619 fixup = []
1620 fixup = []
1620 pctx = self._parents[0]
1621 pctx = self._parents[0]
1621 # do a full compare of any files that might have changed
1622 # do a full compare of any files that might have changed
1622 for f in sorted(files):
1623 for f in sorted(files):
1624 try:
1625 # This will return True for a file that got replaced by a
1626 # directory in the interim, but fixing that is pretty hard.
1623 if (f not in pctx or self.flags(f) != pctx.flags(f)
1627 if (f not in pctx or self.flags(f) != pctx.flags(f)
1624 or pctx[f].cmp(self[f])):
1628 or pctx[f].cmp(self[f])):
1625 modified.append(f)
1629 modified.append(f)
1626 else:
1630 else:
1627 fixup.append(f)
1631 fixup.append(f)
1632 except (IOError, OSError):
1633 # A file become inaccessible in between? Mark it as deleted,
1634 # matching dirstate behavior (issue5584).
1635 # The dirstate has more complex behavior around whether a
1636 # missing file matches a directory, etc, but we don't need to
1637 # bother with that: if f has made it to this point, we're sure
1638 # it's in the dirstate.
1639 deleted.append(f)
1628
1640
1629 # update dirstate for files that are actually clean
1641 # update dirstate for files that are actually clean
1630 if fixup:
1642 if fixup:
1631 try:
1643 try:
1632 # updating the dirstate is optional
1644 # updating the dirstate is optional
1633 # so we don't wait on the lock
1645 # so we don't wait on the lock
1634 # wlock can invalidate the dirstate, so cache normal _after_
1646 # wlock can invalidate the dirstate, so cache normal _after_
1635 # taking the lock
1647 # taking the lock
1636 with self._repo.wlock(False):
1648 with self._repo.wlock(False):
1637 normal = self._repo.dirstate.normal
1649 normal = self._repo.dirstate.normal
1638 for f in fixup:
1650 for f in fixup:
1639 normal(f)
1651 normal(f)
1640 # write changes out explicitly, because nesting
1652 # write changes out explicitly, because nesting
1641 # wlock at runtime may prevent 'wlock.release()'
1653 # wlock at runtime may prevent 'wlock.release()'
1642 # after this block from doing so for subsequent
1654 # after this block from doing so for subsequent
1643 # changing files
1655 # changing files
1644 self._repo.dirstate.write(self._repo.currenttransaction())
1656 self._repo.dirstate.write(self._repo.currenttransaction())
1645 except error.LockError:
1657 except error.LockError:
1646 pass
1658 pass
1647 return modified, fixup
1659 return modified, deleted, fixup
1648
1660
1649 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1661 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1650 unknown=False):
1662 unknown=False):
1651 '''Gets the status from the dirstate -- internal use only.'''
1663 '''Gets the status from the dirstate -- internal use only.'''
1652 listignored, listclean, listunknown = ignored, clean, unknown
1664 listignored, listclean, listunknown = ignored, clean, unknown
1653 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1665 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1654 subrepos = []
1666 subrepos = []
1655 if '.hgsub' in self:
1667 if '.hgsub' in self:
1656 subrepos = sorted(self.substate)
1668 subrepos = sorted(self.substate)
1657 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1669 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1658 listclean, listunknown)
1670 listclean, listunknown)
1659
1671
1660 # check for any possibly clean files
1672 # check for any possibly clean files
1661 if cmp:
1673 if cmp:
1662 modified2, fixup = self._checklookup(cmp)
1674 modified2, deleted2, fixup = self._checklookup(cmp)
1663 s.modified.extend(modified2)
1675 s.modified.extend(modified2)
1676 s.deleted.extend(deleted2)
1664
1677
1665 # update dirstate for files that are actually clean
1678 # update dirstate for files that are actually clean
1666 if fixup and listclean:
1679 if fixup and listclean:
1667 s.clean.extend(fixup)
1680 s.clean.extend(fixup)
1668
1681
1669 if match.always():
1682 if match.always():
1670 # cache for performance
1683 # cache for performance
1671 if s.unknown or s.ignored or s.clean:
1684 if s.unknown or s.ignored or s.clean:
1672 # "_status" is cached with list*=False in the normal route
1685 # "_status" is cached with list*=False in the normal route
1673 self._status = scmutil.status(s.modified, s.added, s.removed,
1686 self._status = scmutil.status(s.modified, s.added, s.removed,
1674 s.deleted, [], [], [])
1687 s.deleted, [], [], [])
1675 else:
1688 else:
1676 self._status = s
1689 self._status = s
1677
1690
1678 return s
1691 return s
1679
1692
1680 @propertycache
1693 @propertycache
1681 def _manifest(self):
1694 def _manifest(self):
1682 """generate a manifest corresponding to the values in self._status
1695 """generate a manifest corresponding to the values in self._status
1683
1696
1684 This reuse the file nodeid from parent, but we use special node
1697 This reuse the file nodeid from parent, but we use special node
1685 identifiers for added and modified files. This is used by manifests
1698 identifiers for added and modified files. This is used by manifests
1686 merge to see that files are different and by update logic to avoid
1699 merge to see that files are different and by update logic to avoid
1687 deleting newly added files.
1700 deleting newly added files.
1688 """
1701 """
1689 return self._buildstatusmanifest(self._status)
1702 return self._buildstatusmanifest(self._status)
1690
1703
1691 def _buildstatusmanifest(self, status):
1704 def _buildstatusmanifest(self, status):
1692 """Builds a manifest that includes the given status results."""
1705 """Builds a manifest that includes the given status results."""
1693 parents = self.parents()
1706 parents = self.parents()
1694
1707
1695 man = parents[0].manifest().copy()
1708 man = parents[0].manifest().copy()
1696
1709
1697 ff = self._flagfunc
1710 ff = self._flagfunc
1698 for i, l in ((addednodeid, status.added),
1711 for i, l in ((addednodeid, status.added),
1699 (modifiednodeid, status.modified)):
1712 (modifiednodeid, status.modified)):
1700 for f in l:
1713 for f in l:
1701 man[f] = i
1714 man[f] = i
1702 try:
1715 try:
1703 man.setflag(f, ff(f))
1716 man.setflag(f, ff(f))
1704 except OSError:
1717 except OSError:
1705 pass
1718 pass
1706
1719
1707 for f in status.deleted + status.removed:
1720 for f in status.deleted + status.removed:
1708 if f in man:
1721 if f in man:
1709 del man[f]
1722 del man[f]
1710
1723
1711 return man
1724 return man
1712
1725
1713 def _buildstatus(self, other, s, match, listignored, listclean,
1726 def _buildstatus(self, other, s, match, listignored, listclean,
1714 listunknown):
1727 listunknown):
1715 """build a status with respect to another context
1728 """build a status with respect to another context
1716
1729
1717 This includes logic for maintaining the fast path of status when
1730 This includes logic for maintaining the fast path of status when
1718 comparing the working directory against its parent, which is to skip
1731 comparing the working directory against its parent, which is to skip
1719 building a new manifest if self (working directory) is not comparing
1732 building a new manifest if self (working directory) is not comparing
1720 against its parent (repo['.']).
1733 against its parent (repo['.']).
1721 """
1734 """
1722 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1735 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1723 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1736 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1724 # might have accidentally ended up with the entire contents of the file
1737 # might have accidentally ended up with the entire contents of the file
1725 # they are supposed to be linking to.
1738 # they are supposed to be linking to.
1726 s.modified[:] = self._filtersuspectsymlink(s.modified)
1739 s.modified[:] = self._filtersuspectsymlink(s.modified)
1727 if other != self._repo['.']:
1740 if other != self._repo['.']:
1728 s = super(workingctx, self)._buildstatus(other, s, match,
1741 s = super(workingctx, self)._buildstatus(other, s, match,
1729 listignored, listclean,
1742 listignored, listclean,
1730 listunknown)
1743 listunknown)
1731 return s
1744 return s
1732
1745
1733 def _matchstatus(self, other, match):
1746 def _matchstatus(self, other, match):
1734 """override the match method with a filter for directory patterns
1747 """override the match method with a filter for directory patterns
1735
1748
1736 We use inheritance to customize the match.bad method only in cases of
1749 We use inheritance to customize the match.bad method only in cases of
1737 workingctx since it belongs only to the working directory when
1750 workingctx since it belongs only to the working directory when
1738 comparing against the parent changeset.
1751 comparing against the parent changeset.
1739
1752
1740 If we aren't comparing against the working directory's parent, then we
1753 If we aren't comparing against the working directory's parent, then we
1741 just use the default match object sent to us.
1754 just use the default match object sent to us.
1742 """
1755 """
1743 superself = super(workingctx, self)
1756 superself = super(workingctx, self)
1744 match = superself._matchstatus(other, match)
1757 match = superself._matchstatus(other, match)
1745 if other != self._repo['.']:
1758 if other != self._repo['.']:
1746 def bad(f, msg):
1759 def bad(f, msg):
1747 # 'f' may be a directory pattern from 'match.files()',
1760 # 'f' may be a directory pattern from 'match.files()',
1748 # so 'f not in ctx1' is not enough
1761 # so 'f not in ctx1' is not enough
1749 if f not in other and not other.hasdir(f):
1762 if f not in other and not other.hasdir(f):
1750 self._repo.ui.warn('%s: %s\n' %
1763 self._repo.ui.warn('%s: %s\n' %
1751 (self._repo.dirstate.pathto(f), msg))
1764 (self._repo.dirstate.pathto(f), msg))
1752 match.bad = bad
1765 match.bad = bad
1753 return match
1766 return match
1754
1767
1755 class committablefilectx(basefilectx):
1768 class committablefilectx(basefilectx):
1756 """A committablefilectx provides common functionality for a file context
1769 """A committablefilectx provides common functionality for a file context
1757 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1770 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1758 def __init__(self, repo, path, filelog=None, ctx=None):
1771 def __init__(self, repo, path, filelog=None, ctx=None):
1759 self._repo = repo
1772 self._repo = repo
1760 self._path = path
1773 self._path = path
1761 self._changeid = None
1774 self._changeid = None
1762 self._filerev = self._filenode = None
1775 self._filerev = self._filenode = None
1763
1776
1764 if filelog is not None:
1777 if filelog is not None:
1765 self._filelog = filelog
1778 self._filelog = filelog
1766 if ctx:
1779 if ctx:
1767 self._changectx = ctx
1780 self._changectx = ctx
1768
1781
1769 def __nonzero__(self):
1782 def __nonzero__(self):
1770 return True
1783 return True
1771
1784
1772 __bool__ = __nonzero__
1785 __bool__ = __nonzero__
1773
1786
1774 def linkrev(self):
1787 def linkrev(self):
1775 # linked to self._changectx no matter if file is modified or not
1788 # linked to self._changectx no matter if file is modified or not
1776 return self.rev()
1789 return self.rev()
1777
1790
1778 def parents(self):
1791 def parents(self):
1779 '''return parent filectxs, following copies if necessary'''
1792 '''return parent filectxs, following copies if necessary'''
1780 def filenode(ctx, path):
1793 def filenode(ctx, path):
1781 return ctx._manifest.get(path, nullid)
1794 return ctx._manifest.get(path, nullid)
1782
1795
1783 path = self._path
1796 path = self._path
1784 fl = self._filelog
1797 fl = self._filelog
1785 pcl = self._changectx._parents
1798 pcl = self._changectx._parents
1786 renamed = self.renamed()
1799 renamed = self.renamed()
1787
1800
1788 if renamed:
1801 if renamed:
1789 pl = [renamed + (None,)]
1802 pl = [renamed + (None,)]
1790 else:
1803 else:
1791 pl = [(path, filenode(pcl[0], path), fl)]
1804 pl = [(path, filenode(pcl[0], path), fl)]
1792
1805
1793 for pc in pcl[1:]:
1806 for pc in pcl[1:]:
1794 pl.append((path, filenode(pc, path), fl))
1807 pl.append((path, filenode(pc, path), fl))
1795
1808
1796 return [self._parentfilectx(p, fileid=n, filelog=l)
1809 return [self._parentfilectx(p, fileid=n, filelog=l)
1797 for p, n, l in pl if n != nullid]
1810 for p, n, l in pl if n != nullid]
1798
1811
1799 def children(self):
1812 def children(self):
1800 return []
1813 return []
1801
1814
1802 class workingfilectx(committablefilectx):
1815 class workingfilectx(committablefilectx):
1803 """A workingfilectx object makes access to data related to a particular
1816 """A workingfilectx object makes access to data related to a particular
1804 file in the working directory convenient."""
1817 file in the working directory convenient."""
1805 def __init__(self, repo, path, filelog=None, workingctx=None):
1818 def __init__(self, repo, path, filelog=None, workingctx=None):
1806 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1819 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1807
1820
1808 @propertycache
1821 @propertycache
1809 def _changectx(self):
1822 def _changectx(self):
1810 return workingctx(self._repo)
1823 return workingctx(self._repo)
1811
1824
1812 def data(self):
1825 def data(self):
1813 return self._repo.wread(self._path)
1826 return self._repo.wread(self._path)
1814 def renamed(self):
1827 def renamed(self):
1815 rp = self._repo.dirstate.copied(self._path)
1828 rp = self._repo.dirstate.copied(self._path)
1816 if not rp:
1829 if not rp:
1817 return None
1830 return None
1818 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1831 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1819
1832
1820 def size(self):
1833 def size(self):
1821 return self._repo.wvfs.lstat(self._path).st_size
1834 return self._repo.wvfs.lstat(self._path).st_size
1822 def date(self):
1835 def date(self):
1823 t, tz = self._changectx.date()
1836 t, tz = self._changectx.date()
1824 try:
1837 try:
1825 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1838 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1826 except OSError as err:
1839 except OSError as err:
1827 if err.errno != errno.ENOENT:
1840 if err.errno != errno.ENOENT:
1828 raise
1841 raise
1829 return (t, tz)
1842 return (t, tz)
1830
1843
1831 def cmp(self, fctx):
1844 def cmp(self, fctx):
1832 """compare with other file context
1845 """compare with other file context
1833
1846
1834 returns True if different than fctx.
1847 returns True if different than fctx.
1835 """
1848 """
1836 # fctx should be a filectx (not a workingfilectx)
1849 # fctx should be a filectx (not a workingfilectx)
1837 # invert comparison to reuse the same code path
1850 # invert comparison to reuse the same code path
1838 return fctx.cmp(self)
1851 return fctx.cmp(self)
1839
1852
1840 def remove(self, ignoremissing=False):
1853 def remove(self, ignoremissing=False):
1841 """wraps unlink for a repo's working directory"""
1854 """wraps unlink for a repo's working directory"""
1842 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1855 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1843
1856
1844 def write(self, data, flags):
1857 def write(self, data, flags):
1845 """wraps repo.wwrite"""
1858 """wraps repo.wwrite"""
1846 self._repo.wwrite(self._path, data, flags)
1859 self._repo.wwrite(self._path, data, flags)
1847
1860
1848 class workingcommitctx(workingctx):
1861 class workingcommitctx(workingctx):
1849 """A workingcommitctx object makes access to data related to
1862 """A workingcommitctx object makes access to data related to
1850 the revision being committed convenient.
1863 the revision being committed convenient.
1851
1864
1852 This hides changes in the working directory, if they aren't
1865 This hides changes in the working directory, if they aren't
1853 committed in this context.
1866 committed in this context.
1854 """
1867 """
1855 def __init__(self, repo, changes,
1868 def __init__(self, repo, changes,
1856 text="", user=None, date=None, extra=None):
1869 text="", user=None, date=None, extra=None):
1857 super(workingctx, self).__init__(repo, text, user, date, extra,
1870 super(workingctx, self).__init__(repo, text, user, date, extra,
1858 changes)
1871 changes)
1859
1872
1860 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1873 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1861 unknown=False):
1874 unknown=False):
1862 """Return matched files only in ``self._status``
1875 """Return matched files only in ``self._status``
1863
1876
1864 Uncommitted files appear "clean" via this context, even if
1877 Uncommitted files appear "clean" via this context, even if
1865 they aren't actually so in the working directory.
1878 they aren't actually so in the working directory.
1866 """
1879 """
1867 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1880 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1868 if clean:
1881 if clean:
1869 clean = [f for f in self._manifest if f not in self._changedset]
1882 clean = [f for f in self._manifest if f not in self._changedset]
1870 else:
1883 else:
1871 clean = []
1884 clean = []
1872 return scmutil.status([f for f in self._status.modified if match(f)],
1885 return scmutil.status([f for f in self._status.modified if match(f)],
1873 [f for f in self._status.added if match(f)],
1886 [f for f in self._status.added if match(f)],
1874 [f for f in self._status.removed if match(f)],
1887 [f for f in self._status.removed if match(f)],
1875 [], [], [], clean)
1888 [], [], [], clean)
1876
1889
1877 @propertycache
1890 @propertycache
1878 def _changedset(self):
1891 def _changedset(self):
1879 """Return the set of files changed in this context
1892 """Return the set of files changed in this context
1880 """
1893 """
1881 changed = set(self._status.modified)
1894 changed = set(self._status.modified)
1882 changed.update(self._status.added)
1895 changed.update(self._status.added)
1883 changed.update(self._status.removed)
1896 changed.update(self._status.removed)
1884 return changed
1897 return changed
1885
1898
1886 def makecachingfilectxfn(func):
1899 def makecachingfilectxfn(func):
1887 """Create a filectxfn that caches based on the path.
1900 """Create a filectxfn that caches based on the path.
1888
1901
1889 We can't use util.cachefunc because it uses all arguments as the cache
1902 We can't use util.cachefunc because it uses all arguments as the cache
1890 key and this creates a cycle since the arguments include the repo and
1903 key and this creates a cycle since the arguments include the repo and
1891 memctx.
1904 memctx.
1892 """
1905 """
1893 cache = {}
1906 cache = {}
1894
1907
1895 def getfilectx(repo, memctx, path):
1908 def getfilectx(repo, memctx, path):
1896 if path not in cache:
1909 if path not in cache:
1897 cache[path] = func(repo, memctx, path)
1910 cache[path] = func(repo, memctx, path)
1898 return cache[path]
1911 return cache[path]
1899
1912
1900 return getfilectx
1913 return getfilectx
1901
1914
1902 class memctx(committablectx):
1915 class memctx(committablectx):
1903 """Use memctx to perform in-memory commits via localrepo.commitctx().
1916 """Use memctx to perform in-memory commits via localrepo.commitctx().
1904
1917
1905 Revision information is supplied at initialization time while
1918 Revision information is supplied at initialization time while
1906 related files data and is made available through a callback
1919 related files data and is made available through a callback
1907 mechanism. 'repo' is the current localrepo, 'parents' is a
1920 mechanism. 'repo' is the current localrepo, 'parents' is a
1908 sequence of two parent revisions identifiers (pass None for every
1921 sequence of two parent revisions identifiers (pass None for every
1909 missing parent), 'text' is the commit message and 'files' lists
1922 missing parent), 'text' is the commit message and 'files' lists
1910 names of files touched by the revision (normalized and relative to
1923 names of files touched by the revision (normalized and relative to
1911 repository root).
1924 repository root).
1912
1925
1913 filectxfn(repo, memctx, path) is a callable receiving the
1926 filectxfn(repo, memctx, path) is a callable receiving the
1914 repository, the current memctx object and the normalized path of
1927 repository, the current memctx object and the normalized path of
1915 requested file, relative to repository root. It is fired by the
1928 requested file, relative to repository root. It is fired by the
1916 commit function for every file in 'files', but calls order is
1929 commit function for every file in 'files', but calls order is
1917 undefined. If the file is available in the revision being
1930 undefined. If the file is available in the revision being
1918 committed (updated or added), filectxfn returns a memfilectx
1931 committed (updated or added), filectxfn returns a memfilectx
1919 object. If the file was removed, filectxfn return None for recent
1932 object. If the file was removed, filectxfn return None for recent
1920 Mercurial. Moved files are represented by marking the source file
1933 Mercurial. Moved files are represented by marking the source file
1921 removed and the new file added with copy information (see
1934 removed and the new file added with copy information (see
1922 memfilectx).
1935 memfilectx).
1923
1936
1924 user receives the committer name and defaults to current
1937 user receives the committer name and defaults to current
1925 repository username, date is the commit date in any format
1938 repository username, date is the commit date in any format
1926 supported by util.parsedate() and defaults to current date, extra
1939 supported by util.parsedate() and defaults to current date, extra
1927 is a dictionary of metadata or is left empty.
1940 is a dictionary of metadata or is left empty.
1928 """
1941 """
1929
1942
1930 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1943 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1931 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1944 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1932 # this field to determine what to do in filectxfn.
1945 # this field to determine what to do in filectxfn.
1933 _returnnoneformissingfiles = True
1946 _returnnoneformissingfiles = True
1934
1947
1935 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1948 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1936 date=None, extra=None, editor=False):
1949 date=None, extra=None, editor=False):
1937 super(memctx, self).__init__(repo, text, user, date, extra)
1950 super(memctx, self).__init__(repo, text, user, date, extra)
1938 self._rev = None
1951 self._rev = None
1939 self._node = None
1952 self._node = None
1940 parents = [(p or nullid) for p in parents]
1953 parents = [(p or nullid) for p in parents]
1941 p1, p2 = parents
1954 p1, p2 = parents
1942 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1955 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1943 files = sorted(set(files))
1956 files = sorted(set(files))
1944 self._files = files
1957 self._files = files
1945 self.substate = {}
1958 self.substate = {}
1946
1959
1947 # if store is not callable, wrap it in a function
1960 # if store is not callable, wrap it in a function
1948 if not callable(filectxfn):
1961 if not callable(filectxfn):
1949 def getfilectx(repo, memctx, path):
1962 def getfilectx(repo, memctx, path):
1950 fctx = filectxfn[path]
1963 fctx = filectxfn[path]
1951 # this is weird but apparently we only keep track of one parent
1964 # this is weird but apparently we only keep track of one parent
1952 # (why not only store that instead of a tuple?)
1965 # (why not only store that instead of a tuple?)
1953 copied = fctx.renamed()
1966 copied = fctx.renamed()
1954 if copied:
1967 if copied:
1955 copied = copied[0]
1968 copied = copied[0]
1956 return memfilectx(repo, path, fctx.data(),
1969 return memfilectx(repo, path, fctx.data(),
1957 islink=fctx.islink(), isexec=fctx.isexec(),
1970 islink=fctx.islink(), isexec=fctx.isexec(),
1958 copied=copied, memctx=memctx)
1971 copied=copied, memctx=memctx)
1959 self._filectxfn = getfilectx
1972 self._filectxfn = getfilectx
1960 else:
1973 else:
1961 # memoizing increases performance for e.g. vcs convert scenarios.
1974 # memoizing increases performance for e.g. vcs convert scenarios.
1962 self._filectxfn = makecachingfilectxfn(filectxfn)
1975 self._filectxfn = makecachingfilectxfn(filectxfn)
1963
1976
1964 if extra:
1977 if extra:
1965 self._extra = extra.copy()
1978 self._extra = extra.copy()
1966 else:
1979 else:
1967 self._extra = {}
1980 self._extra = {}
1968
1981
1969 if self._extra.get('branch', '') == '':
1982 if self._extra.get('branch', '') == '':
1970 self._extra['branch'] = 'default'
1983 self._extra['branch'] = 'default'
1971
1984
1972 if editor:
1985 if editor:
1973 self._text = editor(self._repo, self, [])
1986 self._text = editor(self._repo, self, [])
1974 self._repo.savecommitmessage(self._text)
1987 self._repo.savecommitmessage(self._text)
1975
1988
1976 def filectx(self, path, filelog=None):
1989 def filectx(self, path, filelog=None):
1977 """get a file context from the working directory
1990 """get a file context from the working directory
1978
1991
1979 Returns None if file doesn't exist and should be removed."""
1992 Returns None if file doesn't exist and should be removed."""
1980 return self._filectxfn(self._repo, self, path)
1993 return self._filectxfn(self._repo, self, path)
1981
1994
1982 def commit(self):
1995 def commit(self):
1983 """commit context to the repo"""
1996 """commit context to the repo"""
1984 return self._repo.commitctx(self)
1997 return self._repo.commitctx(self)
1985
1998
1986 @propertycache
1999 @propertycache
1987 def _manifest(self):
2000 def _manifest(self):
1988 """generate a manifest based on the return values of filectxfn"""
2001 """generate a manifest based on the return values of filectxfn"""
1989
2002
1990 # keep this simple for now; just worry about p1
2003 # keep this simple for now; just worry about p1
1991 pctx = self._parents[0]
2004 pctx = self._parents[0]
1992 man = pctx.manifest().copy()
2005 man = pctx.manifest().copy()
1993
2006
1994 for f in self._status.modified:
2007 for f in self._status.modified:
1995 p1node = nullid
2008 p1node = nullid
1996 p2node = nullid
2009 p2node = nullid
1997 p = pctx[f].parents() # if file isn't in pctx, check p2?
2010 p = pctx[f].parents() # if file isn't in pctx, check p2?
1998 if len(p) > 0:
2011 if len(p) > 0:
1999 p1node = p[0].filenode()
2012 p1node = p[0].filenode()
2000 if len(p) > 1:
2013 if len(p) > 1:
2001 p2node = p[1].filenode()
2014 p2node = p[1].filenode()
2002 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2015 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2003
2016
2004 for f in self._status.added:
2017 for f in self._status.added:
2005 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2018 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2006
2019
2007 for f in self._status.removed:
2020 for f in self._status.removed:
2008 if f in man:
2021 if f in man:
2009 del man[f]
2022 del man[f]
2010
2023
2011 return man
2024 return man
2012
2025
2013 @propertycache
2026 @propertycache
2014 def _status(self):
2027 def _status(self):
2015 """Calculate exact status from ``files`` specified at construction
2028 """Calculate exact status from ``files`` specified at construction
2016 """
2029 """
2017 man1 = self.p1().manifest()
2030 man1 = self.p1().manifest()
2018 p2 = self._parents[1]
2031 p2 = self._parents[1]
2019 # "1 < len(self._parents)" can't be used for checking
2032 # "1 < len(self._parents)" can't be used for checking
2020 # existence of the 2nd parent, because "memctx._parents" is
2033 # existence of the 2nd parent, because "memctx._parents" is
2021 # explicitly initialized by the list, of which length is 2.
2034 # explicitly initialized by the list, of which length is 2.
2022 if p2.node() != nullid:
2035 if p2.node() != nullid:
2023 man2 = p2.manifest()
2036 man2 = p2.manifest()
2024 managing = lambda f: f in man1 or f in man2
2037 managing = lambda f: f in man1 or f in man2
2025 else:
2038 else:
2026 managing = lambda f: f in man1
2039 managing = lambda f: f in man1
2027
2040
2028 modified, added, removed = [], [], []
2041 modified, added, removed = [], [], []
2029 for f in self._files:
2042 for f in self._files:
2030 if not managing(f):
2043 if not managing(f):
2031 added.append(f)
2044 added.append(f)
2032 elif self[f]:
2045 elif self[f]:
2033 modified.append(f)
2046 modified.append(f)
2034 else:
2047 else:
2035 removed.append(f)
2048 removed.append(f)
2036
2049
2037 return scmutil.status(modified, added, removed, [], [], [], [])
2050 return scmutil.status(modified, added, removed, [], [], [], [])
2038
2051
2039 class memfilectx(committablefilectx):
2052 class memfilectx(committablefilectx):
2040 """memfilectx represents an in-memory file to commit.
2053 """memfilectx represents an in-memory file to commit.
2041
2054
2042 See memctx and committablefilectx for more details.
2055 See memctx and committablefilectx for more details.
2043 """
2056 """
2044 def __init__(self, repo, path, data, islink=False,
2057 def __init__(self, repo, path, data, islink=False,
2045 isexec=False, copied=None, memctx=None):
2058 isexec=False, copied=None, memctx=None):
2046 """
2059 """
2047 path is the normalized file path relative to repository root.
2060 path is the normalized file path relative to repository root.
2048 data is the file content as a string.
2061 data is the file content as a string.
2049 islink is True if the file is a symbolic link.
2062 islink is True if the file is a symbolic link.
2050 isexec is True if the file is executable.
2063 isexec is True if the file is executable.
2051 copied is the source file path if current file was copied in the
2064 copied is the source file path if current file was copied in the
2052 revision being committed, or None."""
2065 revision being committed, or None."""
2053 super(memfilectx, self).__init__(repo, path, None, memctx)
2066 super(memfilectx, self).__init__(repo, path, None, memctx)
2054 self._data = data
2067 self._data = data
2055 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2068 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2056 self._copied = None
2069 self._copied = None
2057 if copied:
2070 if copied:
2058 self._copied = (copied, nullid)
2071 self._copied = (copied, nullid)
2059
2072
2060 def data(self):
2073 def data(self):
2061 return self._data
2074 return self._data
2062 def size(self):
2075 def size(self):
2063 return len(self.data())
2076 return len(self.data())
2064 def flags(self):
2077 def flags(self):
2065 return self._flags
2078 return self._flags
2066 def renamed(self):
2079 def renamed(self):
2067 return self._copied
2080 return self._copied
2068
2081
2069 def remove(self, ignoremissing=False):
2082 def remove(self, ignoremissing=False):
2070 """wraps unlink for a repo's working directory"""
2083 """wraps unlink for a repo's working directory"""
2071 # need to figure out what to do here
2084 # need to figure out what to do here
2072 del self._changectx[self._path]
2085 del self._changectx[self._path]
2073
2086
2074 def write(self, data, flags):
2087 def write(self, data, flags):
2075 """wraps repo.wwrite"""
2088 """wraps repo.wwrite"""
2076 self._data = data
2089 self._data = data
2077
2090
2078 class metadataonlyctx(committablectx):
2091 class metadataonlyctx(committablectx):
2079 """Like memctx but it's reusing the manifest of different commit.
2092 """Like memctx but it's reusing the manifest of different commit.
2080 Intended to be used by lightweight operations that are creating
2093 Intended to be used by lightweight operations that are creating
2081 metadata-only changes.
2094 metadata-only changes.
2082
2095
2083 Revision information is supplied at initialization time. 'repo' is the
2096 Revision information is supplied at initialization time. 'repo' is the
2084 current localrepo, 'ctx' is original revision which manifest we're reuisng
2097 current localrepo, 'ctx' is original revision which manifest we're reuisng
2085 'parents' is a sequence of two parent revisions identifiers (pass None for
2098 'parents' is a sequence of two parent revisions identifiers (pass None for
2086 every missing parent), 'text' is the commit.
2099 every missing parent), 'text' is the commit.
2087
2100
2088 user receives the committer name and defaults to current repository
2101 user receives the committer name and defaults to current repository
2089 username, date is the commit date in any format supported by
2102 username, date is the commit date in any format supported by
2090 util.parsedate() and defaults to current date, extra is a dictionary of
2103 util.parsedate() and defaults to current date, extra is a dictionary of
2091 metadata or is left empty.
2104 metadata or is left empty.
2092 """
2105 """
2093 def __new__(cls, repo, originalctx, *args, **kwargs):
2106 def __new__(cls, repo, originalctx, *args, **kwargs):
2094 return super(metadataonlyctx, cls).__new__(cls, repo)
2107 return super(metadataonlyctx, cls).__new__(cls, repo)
2095
2108
2096 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2109 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2097 extra=None, editor=False):
2110 extra=None, editor=False):
2098 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2111 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2099 self._rev = None
2112 self._rev = None
2100 self._node = None
2113 self._node = None
2101 self._originalctx = originalctx
2114 self._originalctx = originalctx
2102 self._manifestnode = originalctx.manifestnode()
2115 self._manifestnode = originalctx.manifestnode()
2103 parents = [(p or nullid) for p in parents]
2116 parents = [(p or nullid) for p in parents]
2104 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2117 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2105
2118
2106 # sanity check to ensure that the reused manifest parents are
2119 # sanity check to ensure that the reused manifest parents are
2107 # manifests of our commit parents
2120 # manifests of our commit parents
2108 mp1, mp2 = self.manifestctx().parents
2121 mp1, mp2 = self.manifestctx().parents
2109 if p1 != nullid and p1.manifestnode() != mp1:
2122 if p1 != nullid and p1.manifestnode() != mp1:
2110 raise RuntimeError('can\'t reuse the manifest: '
2123 raise RuntimeError('can\'t reuse the manifest: '
2111 'its p1 doesn\'t match the new ctx p1')
2124 'its p1 doesn\'t match the new ctx p1')
2112 if p2 != nullid and p2.manifestnode() != mp2:
2125 if p2 != nullid and p2.manifestnode() != mp2:
2113 raise RuntimeError('can\'t reuse the manifest: '
2126 raise RuntimeError('can\'t reuse the manifest: '
2114 'its p2 doesn\'t match the new ctx p2')
2127 'its p2 doesn\'t match the new ctx p2')
2115
2128
2116 self._files = originalctx.files()
2129 self._files = originalctx.files()
2117 self.substate = {}
2130 self.substate = {}
2118
2131
2119 if extra:
2132 if extra:
2120 self._extra = extra.copy()
2133 self._extra = extra.copy()
2121 else:
2134 else:
2122 self._extra = {}
2135 self._extra = {}
2123
2136
2124 if self._extra.get('branch', '') == '':
2137 if self._extra.get('branch', '') == '':
2125 self._extra['branch'] = 'default'
2138 self._extra['branch'] = 'default'
2126
2139
2127 if editor:
2140 if editor:
2128 self._text = editor(self._repo, self, [])
2141 self._text = editor(self._repo, self, [])
2129 self._repo.savecommitmessage(self._text)
2142 self._repo.savecommitmessage(self._text)
2130
2143
2131 def manifestnode(self):
2144 def manifestnode(self):
2132 return self._manifestnode
2145 return self._manifestnode
2133
2146
2134 @propertycache
2147 @propertycache
2135 def _manifestctx(self):
2148 def _manifestctx(self):
2136 return self._repo.manifestlog[self._manifestnode]
2149 return self._repo.manifestlog[self._manifestnode]
2137
2150
2138 def filectx(self, path, filelog=None):
2151 def filectx(self, path, filelog=None):
2139 return self._originalctx.filectx(path, filelog=filelog)
2152 return self._originalctx.filectx(path, filelog=filelog)
2140
2153
2141 def commit(self):
2154 def commit(self):
2142 """commit context to the repo"""
2155 """commit context to the repo"""
2143 return self._repo.commitctx(self)
2156 return self._repo.commitctx(self)
2144
2157
2145 @property
2158 @property
2146 def _manifest(self):
2159 def _manifest(self):
2147 return self._originalctx.manifest()
2160 return self._originalctx.manifest()
2148
2161
2149 @propertycache
2162 @propertycache
2150 def _status(self):
2163 def _status(self):
2151 """Calculate exact status from ``files`` specified in the ``origctx``
2164 """Calculate exact status from ``files`` specified in the ``origctx``
2152 and parents manifests.
2165 and parents manifests.
2153 """
2166 """
2154 man1 = self.p1().manifest()
2167 man1 = self.p1().manifest()
2155 p2 = self._parents[1]
2168 p2 = self._parents[1]
2156 # "1 < len(self._parents)" can't be used for checking
2169 # "1 < len(self._parents)" can't be used for checking
2157 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2170 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2158 # explicitly initialized by the list, of which length is 2.
2171 # explicitly initialized by the list, of which length is 2.
2159 if p2.node() != nullid:
2172 if p2.node() != nullid:
2160 man2 = p2.manifest()
2173 man2 = p2.manifest()
2161 managing = lambda f: f in man1 or f in man2
2174 managing = lambda f: f in man1 or f in man2
2162 else:
2175 else:
2163 managing = lambda f: f in man1
2176 managing = lambda f: f in man1
2164
2177
2165 modified, added, removed = [], [], []
2178 modified, added, removed = [], [], []
2166 for f in self._files:
2179 for f in self._files:
2167 if not managing(f):
2180 if not managing(f):
2168 added.append(f)
2181 added.append(f)
2169 elif self[f]:
2182 elif self[f]:
2170 modified.append(f)
2183 modified.append(f)
2171 else:
2184 else:
2172 removed.append(f)
2185 removed.append(f)
2173
2186
2174 return scmutil.status(modified, added, removed, [], [], [], [])
2187 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,33 +1,93
1 $ hg init
1 $ hg init repo
2 $ cd repo
2 $ echo a > a
3 $ echo a > a
3 $ hg add a
4 $ hg add a
4 $ hg commit -m test
5 $ hg commit -m test
5
6
6 Do we ever miss a sub-second change?:
7 Do we ever miss a sub-second change?:
7
8
8 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
9 $ for i in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
9 > hg co -qC 0
10 > hg co -qC 0
10 > echo b > a
11 > echo b > a
11 > hg st
12 > hg st
12 > done
13 > done
13 M a
14 M a
14 M a
15 M a
15 M a
16 M a
16 M a
17 M a
17 M a
18 M a
18 M a
19 M a
19 M a
20 M a
20 M a
21 M a
21 M a
22 M a
22 M a
23 M a
23 M a
24 M a
24 M a
25 M a
25 M a
26 M a
26 M a
27 M a
27 M a
28 M a
28 M a
29 M a
29 M a
30 M a
30 M a
31 M a
31 M a
32 M a
32 M a
33 M a
33
34
35 $ echo test > b
36 $ mkdir dir1
37 $ echo test > dir1/c
38 $ echo test > d
39
40 $ echo test > e
41 #if execbit
42 A directory will typically have the execute bit -- make sure it doesn't get
43 confused with a file with the exec bit set
44 $ chmod +x e
45 #endif
46
47 $ hg add b dir1 d e
48 adding dir1/c
49 $ hg commit -m test2
50
51 $ cat >> $TESTTMP/dirstaterace.py << EOF
52 > from mercurial import (
53 > context,
54 > extensions,
55 > )
56 > def extsetup():
57 > extensions.wrapfunction(context.workingctx, '_checklookup', overridechecklookup)
58 > def overridechecklookup(orig, self, files):
59 > # make an update that changes the dirstate from underneath
60 > self._repo.ui.system(self._repo.ui.config('dirstaterace', 'command'), cwd=self._repo.root)
61 > return orig(self, files)
62 > EOF
63
64 $ hg debugrebuilddirstate
65 $ hg debugdirstate
66 n 0 -1 unset a
67 n 0 -1 unset b
68 n 0 -1 unset d
69 n 0 -1 unset dir1/c
70 n 0 -1 unset e
71
72 XXX Note that this returns M for files that got replaced by directories. This is
73 definitely a bug, but the fix for that is hard and the next status run is fine
74 anyway.
75
76 $ hg status --config extensions.dirstaterace=$TESTTMP/dirstaterace.py \
77 > --config dirstaterace.command='rm b && rm -r dir1 && rm d && mkdir d && rm e && mkdir e'
78 M d
79 M e
80 ! b
81 ! dir1/c
82 $ hg debugdirstate
83 n 644 2 * a (glob)
84 n 0 -1 unset b
85 n 0 -1 unset d
86 n 0 -1 unset dir1/c
87 n 0 -1 unset e
88
89 $ hg status
90 ! b
91 ! d
92 ! dir1/c
93 ! e
General Comments 0
You need to be logged in to leave comments. Login now