##// END OF EJS Templates
context: extract _changesinrange() out of blockancestors()...
Denis Laxalde -
r30824:6e1d54be default
parent child Browse files
Show More
@@ -1,2114 +1,2114 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 newnodeid,
21 newnodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 repoview,
36 repoview,
37 revlog,
37 revlog,
38 scmutil,
38 scmutil,
39 subrepo,
39 subrepo,
40 util,
40 util,
41 )
41 )
42
42
43 propertycache = util.propertycache
43 propertycache = util.propertycache
44
44
45 nonascii = re.compile(r'[^\x21-\x7f]').search
45 nonascii = re.compile(r'[^\x21-\x7f]').search
46
46
47 class basectx(object):
47 class basectx(object):
48 """A basectx object represents the common logic for its children:
48 """A basectx object represents the common logic for its children:
49 changectx: read-only context that is already present in the repo,
49 changectx: read-only context that is already present in the repo,
50 workingctx: a context that represents the working directory and can
50 workingctx: a context that represents the working directory and can
51 be committed,
51 be committed,
52 memctx: a context that represents changes in-memory and can also
52 memctx: a context that represents changes in-memory and can also
53 be committed."""
53 be committed."""
54 def __new__(cls, repo, changeid='', *args, **kwargs):
54 def __new__(cls, repo, changeid='', *args, **kwargs):
55 if isinstance(changeid, basectx):
55 if isinstance(changeid, basectx):
56 return changeid
56 return changeid
57
57
58 o = super(basectx, cls).__new__(cls)
58 o = super(basectx, cls).__new__(cls)
59
59
60 o._repo = repo
60 o._repo = repo
61 o._rev = nullrev
61 o._rev = nullrev
62 o._node = nullid
62 o._node = nullid
63
63
64 return o
64 return o
65
65
66 def __str__(self):
66 def __str__(self):
67 return short(self.node())
67 return short(self.node())
68
68
69 def __int__(self):
69 def __int__(self):
70 return self.rev()
70 return self.rev()
71
71
72 def __repr__(self):
72 def __repr__(self):
73 return "<%s %s>" % (type(self).__name__, str(self))
73 return "<%s %s>" % (type(self).__name__, str(self))
74
74
75 def __eq__(self, other):
75 def __eq__(self, other):
76 try:
76 try:
77 return type(self) == type(other) and self._rev == other._rev
77 return type(self) == type(other) and self._rev == other._rev
78 except AttributeError:
78 except AttributeError:
79 return False
79 return False
80
80
81 def __ne__(self, other):
81 def __ne__(self, other):
82 return not (self == other)
82 return not (self == other)
83
83
84 def __contains__(self, key):
84 def __contains__(self, key):
85 return key in self._manifest
85 return key in self._manifest
86
86
87 def __getitem__(self, key):
87 def __getitem__(self, key):
88 return self.filectx(key)
88 return self.filectx(key)
89
89
90 def __iter__(self):
90 def __iter__(self):
91 return iter(self._manifest)
91 return iter(self._manifest)
92
92
93 def _manifestmatches(self, match, s):
93 def _manifestmatches(self, match, s):
94 """generate a new manifest filtered by the match argument
94 """generate a new manifest filtered by the match argument
95
95
96 This method is for internal use only and mainly exists to provide an
96 This method is for internal use only and mainly exists to provide an
97 object oriented way for other contexts to customize the manifest
97 object oriented way for other contexts to customize the manifest
98 generation.
98 generation.
99 """
99 """
100 return self.manifest().matches(match)
100 return self.manifest().matches(match)
101
101
102 def _matchstatus(self, other, match):
102 def _matchstatus(self, other, match):
103 """return match.always if match is none
103 """return match.always if match is none
104
104
105 This internal method provides a way for child objects to override the
105 This internal method provides a way for child objects to override the
106 match operator.
106 match operator.
107 """
107 """
108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109
109
110 def _buildstatus(self, other, s, match, listignored, listclean,
110 def _buildstatus(self, other, s, match, listignored, listclean,
111 listunknown):
111 listunknown):
112 """build a status with respect to another context"""
112 """build a status with respect to another context"""
113 # Load earliest manifest first for caching reasons. More specifically,
113 # Load earliest manifest first for caching reasons. More specifically,
114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # 1000 and cache it so that when you read 1001, we just need to apply a
116 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # delta to what's in the cache. So that's one full reconstruction + one
117 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta application.
118 # delta application.
119 if self.rev() is not None and self.rev() < other.rev():
119 if self.rev() is not None and self.rev() < other.rev():
120 self.manifest()
120 self.manifest()
121 mf1 = other._manifestmatches(match, s)
121 mf1 = other._manifestmatches(match, s)
122 mf2 = self._manifestmatches(match, s)
122 mf2 = self._manifestmatches(match, s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, clean=listclean)
129 d = mf1.diff(mf2, clean=listclean)
130 for fn, value in d.iteritems():
130 for fn, value in d.iteritems():
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 != newnodeid:
143 elif node2 != newnodeid:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [fn for fn in unknown if fn not in mf1]
156 unknown = [fn for fn in unknown if fn not in mf1]
157 ignored = [fn for fn in ignored if fn not in mf1]
157 ignored = [fn for fn in ignored if fn not in mf1]
158 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
160
160
161 return scmutil.status(modified, added, removed, deleted, unknown,
161 return scmutil.status(modified, added, removed, deleted, unknown,
162 ignored, clean)
162 ignored, clean)
163
163
164 @propertycache
164 @propertycache
165 def substate(self):
165 def substate(self):
166 return subrepo.state(self, self._repo.ui)
166 return subrepo.state(self, self._repo.ui)
167
167
168 def subrev(self, subpath):
168 def subrev(self, subpath):
169 return self.substate[subpath][1]
169 return self.substate[subpath][1]
170
170
171 def rev(self):
171 def rev(self):
172 return self._rev
172 return self._rev
173 def node(self):
173 def node(self):
174 return self._node
174 return self._node
175 def hex(self):
175 def hex(self):
176 return hex(self.node())
176 return hex(self.node())
177 def manifest(self):
177 def manifest(self):
178 return self._manifest
178 return self._manifest
179 def manifestctx(self):
179 def manifestctx(self):
180 return self._manifestctx
180 return self._manifestctx
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
262 mfl = self._repo.manifestlog
263 try:
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
265 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
267 _('not found in manifest'))
268
268
269 return node, flag
269 return node, flag
270
270
271 def filenode(self, path):
271 def filenode(self, path):
272 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
273
273
274 def flags(self, path):
274 def flags(self, path):
275 try:
275 try:
276 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
277 except error.LookupError:
277 except error.LookupError:
278 return ''
278 return ''
279
279
280 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
283
284 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
286
286
287 def workingsub(self, path):
287 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
289 context.
290 '''
290 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
292
292
293 def match(self, pats=[], include=None, exclude=None, default='glob',
293 def match(self, pats=[], include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
295 r = self._repo
295 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
297 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
300
300
301 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
303 if ctx2 is None:
304 ctx2 = self.p1()
304 ctx2 = self.p1()
305 if ctx2 is not None:
305 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, opts)
307 diffopts = patch.diffopts(self._repo.ui, opts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def dirty(self, missing=False, merge=True, branch=True):
316 def dirty(self, missing=False, merge=True, branch=True):
317 return False
317 return False
318
318
319 def status(self, other=None, match=None, listignored=False,
319 def status(self, other=None, match=None, listignored=False,
320 listclean=False, listunknown=False, listsubrepos=False):
320 listclean=False, listunknown=False, listsubrepos=False):
321 """return status of files between two nodes or node and working
321 """return status of files between two nodes or node and working
322 directory.
322 directory.
323
323
324 If other is None, compare this node with working directory.
324 If other is None, compare this node with working directory.
325
325
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 """
327 """
328
328
329 ctx1 = self
329 ctx1 = self
330 ctx2 = self._repo[other]
330 ctx2 = self._repo[other]
331
331
332 # This next code block is, admittedly, fragile logic that tests for
332 # This next code block is, admittedly, fragile logic that tests for
333 # reversing the contexts and wouldn't need to exist if it weren't for
333 # reversing the contexts and wouldn't need to exist if it weren't for
334 # the fast (and common) code path of comparing the working directory
334 # the fast (and common) code path of comparing the working directory
335 # with its first parent.
335 # with its first parent.
336 #
336 #
337 # What we're aiming for here is the ability to call:
337 # What we're aiming for here is the ability to call:
338 #
338 #
339 # workingctx.status(parentctx)
339 # workingctx.status(parentctx)
340 #
340 #
341 # If we always built the manifest for each context and compared those,
341 # If we always built the manifest for each context and compared those,
342 # then we'd be done. But the special case of the above call means we
342 # then we'd be done. But the special case of the above call means we
343 # just copy the manifest of the parent.
343 # just copy the manifest of the parent.
344 reversed = False
344 reversed = False
345 if (not isinstance(ctx1, changectx)
345 if (not isinstance(ctx1, changectx)
346 and isinstance(ctx2, changectx)):
346 and isinstance(ctx2, changectx)):
347 reversed = True
347 reversed = True
348 ctx1, ctx2 = ctx2, ctx1
348 ctx1, ctx2 = ctx2, ctx1
349
349
350 match = ctx2._matchstatus(ctx1, match)
350 match = ctx2._matchstatus(ctx1, match)
351 r = scmutil.status([], [], [], [], [], [], [])
351 r = scmutil.status([], [], [], [], [], [], [])
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 listunknown)
353 listunknown)
354
354
355 if reversed:
355 if reversed:
356 # Reverse added and removed. Clear deleted, unknown and ignored as
356 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # these make no sense to reverse.
357 # these make no sense to reverse.
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r.clean)
359 r.clean)
360
360
361 if listsubrepos:
361 if listsubrepos:
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 try:
363 try:
364 rev2 = ctx2.subrev(subpath)
364 rev2 = ctx2.subrev(subpath)
365 except KeyError:
365 except KeyError:
366 # A subrepo that existed in node1 was deleted between
366 # A subrepo that existed in node1 was deleted between
367 # node1 and node2 (inclusive). Thus, ctx2's substate
367 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # won't contain that subpath. The best we can do ignore it.
368 # won't contain that subpath. The best we can do ignore it.
369 rev2 = None
369 rev2 = None
370 submatch = matchmod.subdirmatcher(subpath, match)
370 submatch = matchmod.subdirmatcher(subpath, match)
371 s = sub.status(rev2, match=submatch, ignored=listignored,
371 s = sub.status(rev2, match=submatch, ignored=listignored,
372 clean=listclean, unknown=listunknown,
372 clean=listclean, unknown=listunknown,
373 listsubrepos=True)
373 listsubrepos=True)
374 for rfiles, sfiles in zip(r, s):
374 for rfiles, sfiles in zip(r, s):
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376
376
377 for l in r:
377 for l in r:
378 l.sort()
378 l.sort()
379
379
380 return r
380 return r
381
381
382
382
383 def makememctx(repo, parents, text, user, date, branch, files, store,
383 def makememctx(repo, parents, text, user, date, branch, files, store,
384 editor=None, extra=None):
384 editor=None, extra=None):
385 def getfilectx(repo, memctx, path):
385 def getfilectx(repo, memctx, path):
386 data, mode, copied = store.getfile(path)
386 data, mode, copied = store.getfile(path)
387 if data is None:
387 if data is None:
388 return None
388 return None
389 islink, isexec = mode
389 islink, isexec = mode
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 copied=copied, memctx=memctx)
391 copied=copied, memctx=memctx)
392 if extra is None:
392 if extra is None:
393 extra = {}
393 extra = {}
394 if branch:
394 if branch:
395 extra['branch'] = encoding.fromlocal(branch)
395 extra['branch'] = encoding.fromlocal(branch)
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 date, extra, editor)
397 date, extra, editor)
398 return ctx
398 return ctx
399
399
400 class changectx(basectx):
400 class changectx(basectx):
401 """A changecontext object makes access to data related to a particular
401 """A changecontext object makes access to data related to a particular
402 changeset convenient. It represents a read-only context already present in
402 changeset convenient. It represents a read-only context already present in
403 the repo."""
403 the repo."""
404 def __init__(self, repo, changeid=''):
404 def __init__(self, repo, changeid=''):
405 """changeid is a revision number, node, or tag"""
405 """changeid is a revision number, node, or tag"""
406
406
407 # since basectx.__new__ already took care of copying the object, we
407 # since basectx.__new__ already took care of copying the object, we
408 # don't need to do anything in __init__, so we just exit here
408 # don't need to do anything in __init__, so we just exit here
409 if isinstance(changeid, basectx):
409 if isinstance(changeid, basectx):
410 return
410 return
411
411
412 if changeid == '':
412 if changeid == '':
413 changeid = '.'
413 changeid = '.'
414 self._repo = repo
414 self._repo = repo
415
415
416 try:
416 try:
417 if isinstance(changeid, int):
417 if isinstance(changeid, int):
418 self._node = repo.changelog.node(changeid)
418 self._node = repo.changelog.node(changeid)
419 self._rev = changeid
419 self._rev = changeid
420 return
420 return
421 if isinstance(changeid, long):
421 if isinstance(changeid, long):
422 changeid = str(changeid)
422 changeid = str(changeid)
423 if changeid == 'null':
423 if changeid == 'null':
424 self._node = nullid
424 self._node = nullid
425 self._rev = nullrev
425 self._rev = nullrev
426 return
426 return
427 if changeid == 'tip':
427 if changeid == 'tip':
428 self._node = repo.changelog.tip()
428 self._node = repo.changelog.tip()
429 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
430 return
430 return
431 if changeid == '.' or changeid == repo.dirstate.p1():
431 if changeid == '.' or changeid == repo.dirstate.p1():
432 # this is a hack to delay/avoid loading obsmarkers
432 # this is a hack to delay/avoid loading obsmarkers
433 # when we know that '.' won't be hidden
433 # when we know that '.' won't be hidden
434 self._node = repo.dirstate.p1()
434 self._node = repo.dirstate.p1()
435 self._rev = repo.unfiltered().changelog.rev(self._node)
435 self._rev = repo.unfiltered().changelog.rev(self._node)
436 return
436 return
437 if len(changeid) == 20:
437 if len(changeid) == 20:
438 try:
438 try:
439 self._node = changeid
439 self._node = changeid
440 self._rev = repo.changelog.rev(changeid)
440 self._rev = repo.changelog.rev(changeid)
441 return
441 return
442 except error.FilteredRepoLookupError:
442 except error.FilteredRepoLookupError:
443 raise
443 raise
444 except LookupError:
444 except LookupError:
445 pass
445 pass
446
446
447 try:
447 try:
448 r = int(changeid)
448 r = int(changeid)
449 if str(r) != changeid:
449 if str(r) != changeid:
450 raise ValueError
450 raise ValueError
451 l = len(repo.changelog)
451 l = len(repo.changelog)
452 if r < 0:
452 if r < 0:
453 r += l
453 r += l
454 if r < 0 or r >= l:
454 if r < 0 or r >= l:
455 raise ValueError
455 raise ValueError
456 self._rev = r
456 self._rev = r
457 self._node = repo.changelog.node(r)
457 self._node = repo.changelog.node(r)
458 return
458 return
459 except error.FilteredIndexError:
459 except error.FilteredIndexError:
460 raise
460 raise
461 except (ValueError, OverflowError, IndexError):
461 except (ValueError, OverflowError, IndexError):
462 pass
462 pass
463
463
464 if len(changeid) == 40:
464 if len(changeid) == 40:
465 try:
465 try:
466 self._node = bin(changeid)
466 self._node = bin(changeid)
467 self._rev = repo.changelog.rev(self._node)
467 self._rev = repo.changelog.rev(self._node)
468 return
468 return
469 except error.FilteredLookupError:
469 except error.FilteredLookupError:
470 raise
470 raise
471 except (TypeError, LookupError):
471 except (TypeError, LookupError):
472 pass
472 pass
473
473
474 # lookup bookmarks through the name interface
474 # lookup bookmarks through the name interface
475 try:
475 try:
476 self._node = repo.names.singlenode(repo, changeid)
476 self._node = repo.names.singlenode(repo, changeid)
477 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
478 return
478 return
479 except KeyError:
479 except KeyError:
480 pass
480 pass
481 except error.FilteredRepoLookupError:
481 except error.FilteredRepoLookupError:
482 raise
482 raise
483 except error.RepoLookupError:
483 except error.RepoLookupError:
484 pass
484 pass
485
485
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 if self._node is not None:
487 if self._node is not None:
488 self._rev = repo.changelog.rev(self._node)
488 self._rev = repo.changelog.rev(self._node)
489 return
489 return
490
490
491 # lookup failed
491 # lookup failed
492 # check if it might have come from damaged dirstate
492 # check if it might have come from damaged dirstate
493 #
493 #
494 # XXX we could avoid the unfiltered if we had a recognizable
494 # XXX we could avoid the unfiltered if we had a recognizable
495 # exception for filtered changeset access
495 # exception for filtered changeset access
496 if changeid in repo.unfiltered().dirstate.parents():
496 if changeid in repo.unfiltered().dirstate.parents():
497 msg = _("working directory has unknown parent '%s'!")
497 msg = _("working directory has unknown parent '%s'!")
498 raise error.Abort(msg % short(changeid))
498 raise error.Abort(msg % short(changeid))
499 try:
499 try:
500 if len(changeid) == 20 and nonascii(changeid):
500 if len(changeid) == 20 and nonascii(changeid):
501 changeid = hex(changeid)
501 changeid = hex(changeid)
502 except TypeError:
502 except TypeError:
503 pass
503 pass
504 except (error.FilteredIndexError, error.FilteredLookupError,
504 except (error.FilteredIndexError, error.FilteredLookupError,
505 error.FilteredRepoLookupError):
505 error.FilteredRepoLookupError):
506 if repo.filtername.startswith('visible'):
506 if repo.filtername.startswith('visible'):
507 msg = _("hidden revision '%s'") % changeid
507 msg = _("hidden revision '%s'") % changeid
508 hint = _('use --hidden to access hidden revisions')
508 hint = _('use --hidden to access hidden revisions')
509 raise error.FilteredRepoLookupError(msg, hint=hint)
509 raise error.FilteredRepoLookupError(msg, hint=hint)
510 msg = _("filtered revision '%s' (not in '%s' subset)")
510 msg = _("filtered revision '%s' (not in '%s' subset)")
511 msg %= (changeid, repo.filtername)
511 msg %= (changeid, repo.filtername)
512 raise error.FilteredRepoLookupError(msg)
512 raise error.FilteredRepoLookupError(msg)
513 except IndexError:
513 except IndexError:
514 pass
514 pass
515 raise error.RepoLookupError(
515 raise error.RepoLookupError(
516 _("unknown revision '%s'") % changeid)
516 _("unknown revision '%s'") % changeid)
517
517
518 def __hash__(self):
518 def __hash__(self):
519 try:
519 try:
520 return hash(self._rev)
520 return hash(self._rev)
521 except AttributeError:
521 except AttributeError:
522 return id(self)
522 return id(self)
523
523
524 def __nonzero__(self):
524 def __nonzero__(self):
525 return self._rev != nullrev
525 return self._rev != nullrev
526
526
527 @propertycache
527 @propertycache
528 def _changeset(self):
528 def _changeset(self):
529 return self._repo.changelog.changelogrevision(self.rev())
529 return self._repo.changelog.changelogrevision(self.rev())
530
530
531 @propertycache
531 @propertycache
532 def _manifest(self):
532 def _manifest(self):
533 return self._manifestctx.read()
533 return self._manifestctx.read()
534
534
535 @propertycache
535 @propertycache
536 def _manifestctx(self):
536 def _manifestctx(self):
537 return self._repo.manifestlog[self._changeset.manifest]
537 return self._repo.manifestlog[self._changeset.manifest]
538
538
539 @propertycache
539 @propertycache
540 def _manifestdelta(self):
540 def _manifestdelta(self):
541 return self._manifestctx.readdelta()
541 return self._manifestctx.readdelta()
542
542
543 @propertycache
543 @propertycache
544 def _parents(self):
544 def _parents(self):
545 repo = self._repo
545 repo = self._repo
546 p1, p2 = repo.changelog.parentrevs(self._rev)
546 p1, p2 = repo.changelog.parentrevs(self._rev)
547 if p2 == nullrev:
547 if p2 == nullrev:
548 return [changectx(repo, p1)]
548 return [changectx(repo, p1)]
549 return [changectx(repo, p1), changectx(repo, p2)]
549 return [changectx(repo, p1), changectx(repo, p2)]
550
550
551 def changeset(self):
551 def changeset(self):
552 c = self._changeset
552 c = self._changeset
553 return (
553 return (
554 c.manifest,
554 c.manifest,
555 c.user,
555 c.user,
556 c.date,
556 c.date,
557 c.files,
557 c.files,
558 c.description,
558 c.description,
559 c.extra,
559 c.extra,
560 )
560 )
561 def manifestnode(self):
561 def manifestnode(self):
562 return self._changeset.manifest
562 return self._changeset.manifest
563
563
564 def user(self):
564 def user(self):
565 return self._changeset.user
565 return self._changeset.user
566 def date(self):
566 def date(self):
567 return self._changeset.date
567 return self._changeset.date
568 def files(self):
568 def files(self):
569 return self._changeset.files
569 return self._changeset.files
570 def description(self):
570 def description(self):
571 return self._changeset.description
571 return self._changeset.description
572 def branch(self):
572 def branch(self):
573 return encoding.tolocal(self._changeset.extra.get("branch"))
573 return encoding.tolocal(self._changeset.extra.get("branch"))
574 def closesbranch(self):
574 def closesbranch(self):
575 return 'close' in self._changeset.extra
575 return 'close' in self._changeset.extra
576 def extra(self):
576 def extra(self):
577 return self._changeset.extra
577 return self._changeset.extra
578 def tags(self):
578 def tags(self):
579 return self._repo.nodetags(self._node)
579 return self._repo.nodetags(self._node)
580 def bookmarks(self):
580 def bookmarks(self):
581 return self._repo.nodebookmarks(self._node)
581 return self._repo.nodebookmarks(self._node)
582 def phase(self):
582 def phase(self):
583 return self._repo._phasecache.phase(self._repo, self._rev)
583 return self._repo._phasecache.phase(self._repo, self._rev)
584 def hidden(self):
584 def hidden(self):
585 return self._rev in repoview.filterrevs(self._repo, 'visible')
585 return self._rev in repoview.filterrevs(self._repo, 'visible')
586
586
587 def children(self):
587 def children(self):
588 """return contexts for each child changeset"""
588 """return contexts for each child changeset"""
589 c = self._repo.changelog.children(self._node)
589 c = self._repo.changelog.children(self._node)
590 return [changectx(self._repo, x) for x in c]
590 return [changectx(self._repo, x) for x in c]
591
591
592 def ancestors(self):
592 def ancestors(self):
593 for a in self._repo.changelog.ancestors([self._rev]):
593 for a in self._repo.changelog.ancestors([self._rev]):
594 yield changectx(self._repo, a)
594 yield changectx(self._repo, a)
595
595
596 def descendants(self):
596 def descendants(self):
597 for d in self._repo.changelog.descendants([self._rev]):
597 for d in self._repo.changelog.descendants([self._rev]):
598 yield changectx(self._repo, d)
598 yield changectx(self._repo, d)
599
599
600 def filectx(self, path, fileid=None, filelog=None):
600 def filectx(self, path, fileid=None, filelog=None):
601 """get a file context from this changeset"""
601 """get a file context from this changeset"""
602 if fileid is None:
602 if fileid is None:
603 fileid = self.filenode(path)
603 fileid = self.filenode(path)
604 return filectx(self._repo, path, fileid=fileid,
604 return filectx(self._repo, path, fileid=fileid,
605 changectx=self, filelog=filelog)
605 changectx=self, filelog=filelog)
606
606
607 def ancestor(self, c2, warn=False):
607 def ancestor(self, c2, warn=False):
608 """return the "best" ancestor context of self and c2
608 """return the "best" ancestor context of self and c2
609
609
610 If there are multiple candidates, it will show a message and check
610 If there are multiple candidates, it will show a message and check
611 merge.preferancestor configuration before falling back to the
611 merge.preferancestor configuration before falling back to the
612 revlog ancestor."""
612 revlog ancestor."""
613 # deal with workingctxs
613 # deal with workingctxs
614 n2 = c2._node
614 n2 = c2._node
615 if n2 is None:
615 if n2 is None:
616 n2 = c2._parents[0]._node
616 n2 = c2._parents[0]._node
617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
618 if not cahs:
618 if not cahs:
619 anc = nullid
619 anc = nullid
620 elif len(cahs) == 1:
620 elif len(cahs) == 1:
621 anc = cahs[0]
621 anc = cahs[0]
622 else:
622 else:
623 # experimental config: merge.preferancestor
623 # experimental config: merge.preferancestor
624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
625 try:
625 try:
626 ctx = changectx(self._repo, r)
626 ctx = changectx(self._repo, r)
627 except error.RepoLookupError:
627 except error.RepoLookupError:
628 continue
628 continue
629 anc = ctx.node()
629 anc = ctx.node()
630 if anc in cahs:
630 if anc in cahs:
631 break
631 break
632 else:
632 else:
633 anc = self._repo.changelog.ancestor(self._node, n2)
633 anc = self._repo.changelog.ancestor(self._node, n2)
634 if warn:
634 if warn:
635 self._repo.ui.status(
635 self._repo.ui.status(
636 (_("note: using %s as ancestor of %s and %s\n") %
636 (_("note: using %s as ancestor of %s and %s\n") %
637 (short(anc), short(self._node), short(n2))) +
637 (short(anc), short(self._node), short(n2))) +
638 ''.join(_(" alternatively, use --config "
638 ''.join(_(" alternatively, use --config "
639 "merge.preferancestor=%s\n") %
639 "merge.preferancestor=%s\n") %
640 short(n) for n in sorted(cahs) if n != anc))
640 short(n) for n in sorted(cahs) if n != anc))
641 return changectx(self._repo, anc)
641 return changectx(self._repo, anc)
642
642
643 def descendant(self, other):
643 def descendant(self, other):
644 """True if other is descendant of this changeset"""
644 """True if other is descendant of this changeset"""
645 return self._repo.changelog.descendant(self._rev, other._rev)
645 return self._repo.changelog.descendant(self._rev, other._rev)
646
646
647 def walk(self, match):
647 def walk(self, match):
648 '''Generates matching file names.'''
648 '''Generates matching file names.'''
649
649
650 # Wrap match.bad method to have message with nodeid
650 # Wrap match.bad method to have message with nodeid
651 def bad(fn, msg):
651 def bad(fn, msg):
652 # The manifest doesn't know about subrepos, so don't complain about
652 # The manifest doesn't know about subrepos, so don't complain about
653 # paths into valid subrepos.
653 # paths into valid subrepos.
654 if any(fn == s or fn.startswith(s + '/')
654 if any(fn == s or fn.startswith(s + '/')
655 for s in self.substate):
655 for s in self.substate):
656 return
656 return
657 match.bad(fn, _('no such file in rev %s') % self)
657 match.bad(fn, _('no such file in rev %s') % self)
658
658
659 m = matchmod.badmatch(match, bad)
659 m = matchmod.badmatch(match, bad)
660 return self._manifest.walk(m)
660 return self._manifest.walk(m)
661
661
662 def matches(self, match):
662 def matches(self, match):
663 return self.walk(match)
663 return self.walk(match)
664
664
665 class basefilectx(object):
665 class basefilectx(object):
666 """A filecontext object represents the common logic for its children:
666 """A filecontext object represents the common logic for its children:
667 filectx: read-only access to a filerevision that is already present
667 filectx: read-only access to a filerevision that is already present
668 in the repo,
668 in the repo,
669 workingfilectx: a filecontext that represents files from the working
669 workingfilectx: a filecontext that represents files from the working
670 directory,
670 directory,
671 memfilectx: a filecontext that represents files in-memory."""
671 memfilectx: a filecontext that represents files in-memory."""
672 def __new__(cls, repo, path, *args, **kwargs):
672 def __new__(cls, repo, path, *args, **kwargs):
673 return super(basefilectx, cls).__new__(cls)
673 return super(basefilectx, cls).__new__(cls)
674
674
675 @propertycache
675 @propertycache
676 def _filelog(self):
676 def _filelog(self):
677 return self._repo.file(self._path)
677 return self._repo.file(self._path)
678
678
679 @propertycache
679 @propertycache
680 def _changeid(self):
680 def _changeid(self):
681 if '_changeid' in self.__dict__:
681 if '_changeid' in self.__dict__:
682 return self._changeid
682 return self._changeid
683 elif '_changectx' in self.__dict__:
683 elif '_changectx' in self.__dict__:
684 return self._changectx.rev()
684 return self._changectx.rev()
685 elif '_descendantrev' in self.__dict__:
685 elif '_descendantrev' in self.__dict__:
686 # this file context was created from a revision with a known
686 # this file context was created from a revision with a known
687 # descendant, we can (lazily) correct for linkrev aliases
687 # descendant, we can (lazily) correct for linkrev aliases
688 return self._adjustlinkrev(self._descendantrev)
688 return self._adjustlinkrev(self._descendantrev)
689 else:
689 else:
690 return self._filelog.linkrev(self._filerev)
690 return self._filelog.linkrev(self._filerev)
691
691
692 @propertycache
692 @propertycache
693 def _filenode(self):
693 def _filenode(self):
694 if '_fileid' in self.__dict__:
694 if '_fileid' in self.__dict__:
695 return self._filelog.lookup(self._fileid)
695 return self._filelog.lookup(self._fileid)
696 else:
696 else:
697 return self._changectx.filenode(self._path)
697 return self._changectx.filenode(self._path)
698
698
699 @propertycache
699 @propertycache
700 def _filerev(self):
700 def _filerev(self):
701 return self._filelog.rev(self._filenode)
701 return self._filelog.rev(self._filenode)
702
702
703 @propertycache
703 @propertycache
704 def _repopath(self):
704 def _repopath(self):
705 return self._path
705 return self._path
706
706
707 def __nonzero__(self):
707 def __nonzero__(self):
708 try:
708 try:
709 self._filenode
709 self._filenode
710 return True
710 return True
711 except error.LookupError:
711 except error.LookupError:
712 # file is missing
712 # file is missing
713 return False
713 return False
714
714
715 def __str__(self):
715 def __str__(self):
716 try:
716 try:
717 return "%s@%s" % (self.path(), self._changectx)
717 return "%s@%s" % (self.path(), self._changectx)
718 except error.LookupError:
718 except error.LookupError:
719 return "%s@???" % self.path()
719 return "%s@???" % self.path()
720
720
721 def __repr__(self):
721 def __repr__(self):
722 return "<%s %s>" % (type(self).__name__, str(self))
722 return "<%s %s>" % (type(self).__name__, str(self))
723
723
724 def __hash__(self):
724 def __hash__(self):
725 try:
725 try:
726 return hash((self._path, self._filenode))
726 return hash((self._path, self._filenode))
727 except AttributeError:
727 except AttributeError:
728 return id(self)
728 return id(self)
729
729
730 def __eq__(self, other):
730 def __eq__(self, other):
731 try:
731 try:
732 return (type(self) == type(other) and self._path == other._path
732 return (type(self) == type(other) and self._path == other._path
733 and self._filenode == other._filenode)
733 and self._filenode == other._filenode)
734 except AttributeError:
734 except AttributeError:
735 return False
735 return False
736
736
737 def __ne__(self, other):
737 def __ne__(self, other):
738 return not (self == other)
738 return not (self == other)
739
739
740 def filerev(self):
740 def filerev(self):
741 return self._filerev
741 return self._filerev
742 def filenode(self):
742 def filenode(self):
743 return self._filenode
743 return self._filenode
744 def flags(self):
744 def flags(self):
745 return self._changectx.flags(self._path)
745 return self._changectx.flags(self._path)
746 def filelog(self):
746 def filelog(self):
747 return self._filelog
747 return self._filelog
748 def rev(self):
748 def rev(self):
749 return self._changeid
749 return self._changeid
750 def linkrev(self):
750 def linkrev(self):
751 return self._filelog.linkrev(self._filerev)
751 return self._filelog.linkrev(self._filerev)
752 def node(self):
752 def node(self):
753 return self._changectx.node()
753 return self._changectx.node()
754 def hex(self):
754 def hex(self):
755 return self._changectx.hex()
755 return self._changectx.hex()
756 def user(self):
756 def user(self):
757 return self._changectx.user()
757 return self._changectx.user()
758 def date(self):
758 def date(self):
759 return self._changectx.date()
759 return self._changectx.date()
760 def files(self):
760 def files(self):
761 return self._changectx.files()
761 return self._changectx.files()
762 def description(self):
762 def description(self):
763 return self._changectx.description()
763 return self._changectx.description()
764 def branch(self):
764 def branch(self):
765 return self._changectx.branch()
765 return self._changectx.branch()
766 def extra(self):
766 def extra(self):
767 return self._changectx.extra()
767 return self._changectx.extra()
768 def phase(self):
768 def phase(self):
769 return self._changectx.phase()
769 return self._changectx.phase()
770 def phasestr(self):
770 def phasestr(self):
771 return self._changectx.phasestr()
771 return self._changectx.phasestr()
772 def manifest(self):
772 def manifest(self):
773 return self._changectx.manifest()
773 return self._changectx.manifest()
774 def changectx(self):
774 def changectx(self):
775 return self._changectx
775 return self._changectx
776 def repo(self):
776 def repo(self):
777 return self._repo
777 return self._repo
778
778
779 def path(self):
779 def path(self):
780 return self._path
780 return self._path
781
781
782 def isbinary(self):
782 def isbinary(self):
783 try:
783 try:
784 return util.binary(self.data())
784 return util.binary(self.data())
785 except IOError:
785 except IOError:
786 return False
786 return False
787 def isexec(self):
787 def isexec(self):
788 return 'x' in self.flags()
788 return 'x' in self.flags()
789 def islink(self):
789 def islink(self):
790 return 'l' in self.flags()
790 return 'l' in self.flags()
791
791
792 def isabsent(self):
792 def isabsent(self):
793 """whether this filectx represents a file not in self._changectx
793 """whether this filectx represents a file not in self._changectx
794
794
795 This is mainly for merge code to detect change/delete conflicts. This is
795 This is mainly for merge code to detect change/delete conflicts. This is
796 expected to be True for all subclasses of basectx."""
796 expected to be True for all subclasses of basectx."""
797 return False
797 return False
798
798
799 _customcmp = False
799 _customcmp = False
800 def cmp(self, fctx):
800 def cmp(self, fctx):
801 """compare with other file context
801 """compare with other file context
802
802
803 returns True if different than fctx.
803 returns True if different than fctx.
804 """
804 """
805 if fctx._customcmp:
805 if fctx._customcmp:
806 return fctx.cmp(self)
806 return fctx.cmp(self)
807
807
808 if (fctx._filenode is None
808 if (fctx._filenode is None
809 and (self._repo._encodefilterpats
809 and (self._repo._encodefilterpats
810 # if file data starts with '\1\n', empty metadata block is
810 # if file data starts with '\1\n', empty metadata block is
811 # prepended, which adds 4 bytes to filelog.size().
811 # prepended, which adds 4 bytes to filelog.size().
812 or self.size() - 4 == fctx.size())
812 or self.size() - 4 == fctx.size())
813 or self.size() == fctx.size()):
813 or self.size() == fctx.size()):
814 return self._filelog.cmp(self._filenode, fctx.data())
814 return self._filelog.cmp(self._filenode, fctx.data())
815
815
816 return True
816 return True
817
817
818 def _adjustlinkrev(self, srcrev, inclusive=False):
818 def _adjustlinkrev(self, srcrev, inclusive=False):
819 """return the first ancestor of <srcrev> introducing <fnode>
819 """return the first ancestor of <srcrev> introducing <fnode>
820
820
821 If the linkrev of the file revision does not point to an ancestor of
821 If the linkrev of the file revision does not point to an ancestor of
822 srcrev, we'll walk down the ancestors until we find one introducing
822 srcrev, we'll walk down the ancestors until we find one introducing
823 this file revision.
823 this file revision.
824
824
825 :srcrev: the changeset revision we search ancestors from
825 :srcrev: the changeset revision we search ancestors from
826 :inclusive: if true, the src revision will also be checked
826 :inclusive: if true, the src revision will also be checked
827 """
827 """
828 repo = self._repo
828 repo = self._repo
829 cl = repo.unfiltered().changelog
829 cl = repo.unfiltered().changelog
830 mfl = repo.manifestlog
830 mfl = repo.manifestlog
831 # fetch the linkrev
831 # fetch the linkrev
832 lkr = self.linkrev()
832 lkr = self.linkrev()
833 # hack to reuse ancestor computation when searching for renames
833 # hack to reuse ancestor computation when searching for renames
834 memberanc = getattr(self, '_ancestrycontext', None)
834 memberanc = getattr(self, '_ancestrycontext', None)
835 iteranc = None
835 iteranc = None
836 if srcrev is None:
836 if srcrev is None:
837 # wctx case, used by workingfilectx during mergecopy
837 # wctx case, used by workingfilectx during mergecopy
838 revs = [p.rev() for p in self._repo[None].parents()]
838 revs = [p.rev() for p in self._repo[None].parents()]
839 inclusive = True # we skipped the real (revless) source
839 inclusive = True # we skipped the real (revless) source
840 else:
840 else:
841 revs = [srcrev]
841 revs = [srcrev]
842 if memberanc is None:
842 if memberanc is None:
843 memberanc = iteranc = cl.ancestors(revs, lkr,
843 memberanc = iteranc = cl.ancestors(revs, lkr,
844 inclusive=inclusive)
844 inclusive=inclusive)
845 # check if this linkrev is an ancestor of srcrev
845 # check if this linkrev is an ancestor of srcrev
846 if lkr not in memberanc:
846 if lkr not in memberanc:
847 if iteranc is None:
847 if iteranc is None:
848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 fnode = self._filenode
849 fnode = self._filenode
850 path = self._path
850 path = self._path
851 for a in iteranc:
851 for a in iteranc:
852 ac = cl.read(a) # get changeset data (we avoid object creation)
852 ac = cl.read(a) # get changeset data (we avoid object creation)
853 if path in ac[3]: # checking the 'files' field.
853 if path in ac[3]: # checking the 'files' field.
854 # The file has been touched, check if the content is
854 # The file has been touched, check if the content is
855 # similar to the one we search for.
855 # similar to the one we search for.
856 if fnode == mfl[ac[0]].readfast().get(path):
856 if fnode == mfl[ac[0]].readfast().get(path):
857 return a
857 return a
858 # In theory, we should never get out of that loop without a result.
858 # In theory, we should never get out of that loop without a result.
859 # But if manifest uses a buggy file revision (not children of the
859 # But if manifest uses a buggy file revision (not children of the
860 # one it replaces) we could. Such a buggy situation will likely
860 # one it replaces) we could. Such a buggy situation will likely
861 # result is crash somewhere else at to some point.
861 # result is crash somewhere else at to some point.
862 return lkr
862 return lkr
863
863
864 def introrev(self):
864 def introrev(self):
865 """return the rev of the changeset which introduced this file revision
865 """return the rev of the changeset which introduced this file revision
866
866
867 This method is different from linkrev because it take into account the
867 This method is different from linkrev because it take into account the
868 changeset the filectx was created from. It ensures the returned
868 changeset the filectx was created from. It ensures the returned
869 revision is one of its ancestors. This prevents bugs from
869 revision is one of its ancestors. This prevents bugs from
870 'linkrev-shadowing' when a file revision is used by multiple
870 'linkrev-shadowing' when a file revision is used by multiple
871 changesets.
871 changesets.
872 """
872 """
873 lkr = self.linkrev()
873 lkr = self.linkrev()
874 attrs = vars(self)
874 attrs = vars(self)
875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
876 if noctx or self.rev() == lkr:
876 if noctx or self.rev() == lkr:
877 return self.linkrev()
877 return self.linkrev()
878 return self._adjustlinkrev(self.rev(), inclusive=True)
878 return self._adjustlinkrev(self.rev(), inclusive=True)
879
879
880 def _parentfilectx(self, path, fileid, filelog):
880 def _parentfilectx(self, path, fileid, filelog):
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 # If self is associated with a changeset (probably explicitly
884 # If self is associated with a changeset (probably explicitly
885 # fed), ensure the created filectx is associated with a
885 # fed), ensure the created filectx is associated with a
886 # changeset that is an ancestor of self.changectx.
886 # changeset that is an ancestor of self.changectx.
887 # This lets us later use _adjustlinkrev to get a correct link.
887 # This lets us later use _adjustlinkrev to get a correct link.
888 fctx._descendantrev = self.rev()
888 fctx._descendantrev = self.rev()
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 elif '_descendantrev' in vars(self):
890 elif '_descendantrev' in vars(self):
891 # Otherwise propagate _descendantrev if we have one associated.
891 # Otherwise propagate _descendantrev if we have one associated.
892 fctx._descendantrev = self._descendantrev
892 fctx._descendantrev = self._descendantrev
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 return fctx
894 return fctx
895
895
896 def parents(self):
896 def parents(self):
897 _path = self._path
897 _path = self._path
898 fl = self._filelog
898 fl = self._filelog
899 parents = self._filelog.parents(self._filenode)
899 parents = self._filelog.parents(self._filenode)
900 pl = [(_path, node, fl) for node in parents if node != nullid]
900 pl = [(_path, node, fl) for node in parents if node != nullid]
901
901
902 r = fl.renamed(self._filenode)
902 r = fl.renamed(self._filenode)
903 if r:
903 if r:
904 # - In the simple rename case, both parent are nullid, pl is empty.
904 # - In the simple rename case, both parent are nullid, pl is empty.
905 # - In case of merge, only one of the parent is null id and should
905 # - In case of merge, only one of the parent is null id and should
906 # be replaced with the rename information. This parent is -always-
906 # be replaced with the rename information. This parent is -always-
907 # the first one.
907 # the first one.
908 #
908 #
909 # As null id have always been filtered out in the previous list
909 # As null id have always been filtered out in the previous list
910 # comprehension, inserting to 0 will always result in "replacing
910 # comprehension, inserting to 0 will always result in "replacing
911 # first nullid parent with rename information.
911 # first nullid parent with rename information.
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913
913
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915
915
916 def p1(self):
916 def p1(self):
917 return self.parents()[0]
917 return self.parents()[0]
918
918
919 def p2(self):
919 def p2(self):
920 p = self.parents()
920 p = self.parents()
921 if len(p) == 2:
921 if len(p) == 2:
922 return p[1]
922 return p[1]
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924
924
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 '''returns a list of tuples of ((ctx, number), line) for each line
926 '''returns a list of tuples of ((ctx, number), line) for each line
927 in the file, where ctx is the filectx of the node where
927 in the file, where ctx is the filectx of the node where
928 that line was last changed; if linenumber parameter is true, number is
928 that line was last changed; if linenumber parameter is true, number is
929 the line number at the first appearance in the managed file, otherwise,
929 the line number at the first appearance in the managed file, otherwise,
930 number has a fixed value of False.
930 number has a fixed value of False.
931 '''
931 '''
932
932
933 def lines(text):
933 def lines(text):
934 if text.endswith("\n"):
934 if text.endswith("\n"):
935 return text.count("\n")
935 return text.count("\n")
936 return text.count("\n") + int(bool(text))
936 return text.count("\n") + int(bool(text))
937
937
938 if linenumber:
938 if linenumber:
939 def decorate(text, rev):
939 def decorate(text, rev):
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 else:
941 else:
942 def decorate(text, rev):
942 def decorate(text, rev):
943 return ([(rev, False)] * lines(text), text)
943 return ([(rev, False)] * lines(text), text)
944
944
945 def pair(parent, child):
945 def pair(parent, child):
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 for (a1, a2, b1, b2), t in blocks:
947 for (a1, a2, b1, b2), t in blocks:
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 # belong to the child.
949 # belong to the child.
950 if t == '=':
950 if t == '=':
951 child[0][b1:b2] = parent[0][a1:a2]
951 child[0][b1:b2] = parent[0][a1:a2]
952 return child
952 return child
953
953
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955
955
956 def parents(f):
956 def parents(f):
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 # isn't an ancestor of the srcrev.
960 # isn't an ancestor of the srcrev.
961 f._changeid
961 f._changeid
962 pl = f.parents()
962 pl = f.parents()
963
963
964 # Don't return renamed parents if we aren't following.
964 # Don't return renamed parents if we aren't following.
965 if not follow:
965 if not follow:
966 pl = [p for p in pl if p.path() == f.path()]
966 pl = [p for p in pl if p.path() == f.path()]
967
967
968 # renamed filectx won't have a filelog yet, so set it
968 # renamed filectx won't have a filelog yet, so set it
969 # from the cache to save time
969 # from the cache to save time
970 for p in pl:
970 for p in pl:
971 if not '_filelog' in p.__dict__:
971 if not '_filelog' in p.__dict__:
972 p._filelog = getlog(p.path())
972 p._filelog = getlog(p.path())
973
973
974 return pl
974 return pl
975
975
976 # use linkrev to find the first changeset where self appeared
976 # use linkrev to find the first changeset where self appeared
977 base = self
977 base = self
978 introrev = self.introrev()
978 introrev = self.introrev()
979 if self.rev() != introrev:
979 if self.rev() != introrev:
980 base = self.filectx(self.filenode(), changeid=introrev)
980 base = self.filectx(self.filenode(), changeid=introrev)
981 if getattr(base, '_ancestrycontext', None) is None:
981 if getattr(base, '_ancestrycontext', None) is None:
982 cl = self._repo.changelog
982 cl = self._repo.changelog
983 if introrev is None:
983 if introrev is None:
984 # wctx is not inclusive, but works because _ancestrycontext
984 # wctx is not inclusive, but works because _ancestrycontext
985 # is used to test filelog revisions
985 # is used to test filelog revisions
986 ac = cl.ancestors([p.rev() for p in base.parents()],
986 ac = cl.ancestors([p.rev() for p in base.parents()],
987 inclusive=True)
987 inclusive=True)
988 else:
988 else:
989 ac = cl.ancestors([introrev], inclusive=True)
989 ac = cl.ancestors([introrev], inclusive=True)
990 base._ancestrycontext = ac
990 base._ancestrycontext = ac
991
991
992 # This algorithm would prefer to be recursive, but Python is a
992 # This algorithm would prefer to be recursive, but Python is a
993 # bit recursion-hostile. Instead we do an iterative
993 # bit recursion-hostile. Instead we do an iterative
994 # depth-first search.
994 # depth-first search.
995
995
996 # 1st DFS pre-calculates pcache and needed
996 # 1st DFS pre-calculates pcache and needed
997 visit = [base]
997 visit = [base]
998 pcache = {}
998 pcache = {}
999 needed = {base: 1}
999 needed = {base: 1}
1000 while visit:
1000 while visit:
1001 f = visit.pop()
1001 f = visit.pop()
1002 if f in pcache:
1002 if f in pcache:
1003 continue
1003 continue
1004 pl = parents(f)
1004 pl = parents(f)
1005 pcache[f] = pl
1005 pcache[f] = pl
1006 for p in pl:
1006 for p in pl:
1007 needed[p] = needed.get(p, 0) + 1
1007 needed[p] = needed.get(p, 0) + 1
1008 if p not in pcache:
1008 if p not in pcache:
1009 visit.append(p)
1009 visit.append(p)
1010
1010
1011 # 2nd DFS does the actual annotate
1011 # 2nd DFS does the actual annotate
1012 visit[:] = [base]
1012 visit[:] = [base]
1013 hist = {}
1013 hist = {}
1014 while visit:
1014 while visit:
1015 f = visit[-1]
1015 f = visit[-1]
1016 if f in hist:
1016 if f in hist:
1017 visit.pop()
1017 visit.pop()
1018 continue
1018 continue
1019
1019
1020 ready = True
1020 ready = True
1021 pl = pcache[f]
1021 pl = pcache[f]
1022 for p in pl:
1022 for p in pl:
1023 if p not in hist:
1023 if p not in hist:
1024 ready = False
1024 ready = False
1025 visit.append(p)
1025 visit.append(p)
1026 if ready:
1026 if ready:
1027 visit.pop()
1027 visit.pop()
1028 curr = decorate(f.data(), f)
1028 curr = decorate(f.data(), f)
1029 for p in pl:
1029 for p in pl:
1030 curr = pair(hist[p], curr)
1030 curr = pair(hist[p], curr)
1031 if needed[p] == 1:
1031 if needed[p] == 1:
1032 del hist[p]
1032 del hist[p]
1033 del needed[p]
1033 del needed[p]
1034 else:
1034 else:
1035 needed[p] -= 1
1035 needed[p] -= 1
1036
1036
1037 hist[f] = curr
1037 hist[f] = curr
1038 del pcache[f]
1038 del pcache[f]
1039
1039
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1041
1041
1042 def ancestors(self, followfirst=False):
1042 def ancestors(self, followfirst=False):
1043 visit = {}
1043 visit = {}
1044 c = self
1044 c = self
1045 if followfirst:
1045 if followfirst:
1046 cut = 1
1046 cut = 1
1047 else:
1047 else:
1048 cut = None
1048 cut = None
1049
1049
1050 while True:
1050 while True:
1051 for parent in c.parents()[:cut]:
1051 for parent in c.parents()[:cut]:
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1053 if not visit:
1053 if not visit:
1054 break
1054 break
1055 c = visit.pop(max(visit))
1055 c = visit.pop(max(visit))
1056 yield c
1056 yield c
1057
1057
1058 class filectx(basefilectx):
1058 class filectx(basefilectx):
1059 """A filecontext object makes access to data related to a particular
1059 """A filecontext object makes access to data related to a particular
1060 filerevision convenient."""
1060 filerevision convenient."""
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1062 filelog=None, changectx=None):
1062 filelog=None, changectx=None):
1063 """changeid can be a changeset revision, node, or tag.
1063 """changeid can be a changeset revision, node, or tag.
1064 fileid can be a file revision or node."""
1064 fileid can be a file revision or node."""
1065 self._repo = repo
1065 self._repo = repo
1066 self._path = path
1066 self._path = path
1067
1067
1068 assert (changeid is not None
1068 assert (changeid is not None
1069 or fileid is not None
1069 or fileid is not None
1070 or changectx is not None), \
1070 or changectx is not None), \
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 % (changeid, fileid, changectx))
1072 % (changeid, fileid, changectx))
1073
1073
1074 if filelog is not None:
1074 if filelog is not None:
1075 self._filelog = filelog
1075 self._filelog = filelog
1076
1076
1077 if changeid is not None:
1077 if changeid is not None:
1078 self._changeid = changeid
1078 self._changeid = changeid
1079 if changectx is not None:
1079 if changectx is not None:
1080 self._changectx = changectx
1080 self._changectx = changectx
1081 if fileid is not None:
1081 if fileid is not None:
1082 self._fileid = fileid
1082 self._fileid = fileid
1083
1083
1084 @propertycache
1084 @propertycache
1085 def _changectx(self):
1085 def _changectx(self):
1086 try:
1086 try:
1087 return changectx(self._repo, self._changeid)
1087 return changectx(self._repo, self._changeid)
1088 except error.FilteredRepoLookupError:
1088 except error.FilteredRepoLookupError:
1089 # Linkrev may point to any revision in the repository. When the
1089 # Linkrev may point to any revision in the repository. When the
1090 # repository is filtered this may lead to `filectx` trying to build
1090 # repository is filtered this may lead to `filectx` trying to build
1091 # `changectx` for filtered revision. In such case we fallback to
1091 # `changectx` for filtered revision. In such case we fallback to
1092 # creating `changectx` on the unfiltered version of the reposition.
1092 # creating `changectx` on the unfiltered version of the reposition.
1093 # This fallback should not be an issue because `changectx` from
1093 # This fallback should not be an issue because `changectx` from
1094 # `filectx` are not used in complex operations that care about
1094 # `filectx` are not used in complex operations that care about
1095 # filtering.
1095 # filtering.
1096 #
1096 #
1097 # This fallback is a cheap and dirty fix that prevent several
1097 # This fallback is a cheap and dirty fix that prevent several
1098 # crashes. It does not ensure the behavior is correct. However the
1098 # crashes. It does not ensure the behavior is correct. However the
1099 # behavior was not correct before filtering either and "incorrect
1099 # behavior was not correct before filtering either and "incorrect
1100 # behavior" is seen as better as "crash"
1100 # behavior" is seen as better as "crash"
1101 #
1101 #
1102 # Linkrevs have several serious troubles with filtering that are
1102 # Linkrevs have several serious troubles with filtering that are
1103 # complicated to solve. Proper handling of the issue here should be
1103 # complicated to solve. Proper handling of the issue here should be
1104 # considered when solving linkrev issue are on the table.
1104 # considered when solving linkrev issue are on the table.
1105 return changectx(self._repo.unfiltered(), self._changeid)
1105 return changectx(self._repo.unfiltered(), self._changeid)
1106
1106
1107 def filectx(self, fileid, changeid=None):
1107 def filectx(self, fileid, changeid=None):
1108 '''opens an arbitrary revision of the file without
1108 '''opens an arbitrary revision of the file without
1109 opening a new filelog'''
1109 opening a new filelog'''
1110 return filectx(self._repo, self._path, fileid=fileid,
1110 return filectx(self._repo, self._path, fileid=fileid,
1111 filelog=self._filelog, changeid=changeid)
1111 filelog=self._filelog, changeid=changeid)
1112
1112
1113 def rawdata(self):
1113 def rawdata(self):
1114 return self._filelog.revision(self._filenode, raw=True)
1114 return self._filelog.revision(self._filenode, raw=True)
1115
1115
1116 def data(self):
1116 def data(self):
1117 try:
1117 try:
1118 return self._filelog.read(self._filenode)
1118 return self._filelog.read(self._filenode)
1119 except error.CensoredNodeError:
1119 except error.CensoredNodeError:
1120 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1120 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1121 return ""
1121 return ""
1122 raise error.Abort(_("censored node: %s") % short(self._filenode),
1122 raise error.Abort(_("censored node: %s") % short(self._filenode),
1123 hint=_("set censor.policy to ignore errors"))
1123 hint=_("set censor.policy to ignore errors"))
1124
1124
1125 def size(self):
1125 def size(self):
1126 return self._filelog.size(self._filerev)
1126 return self._filelog.size(self._filerev)
1127
1127
1128 def renamed(self):
1128 def renamed(self):
1129 """check if file was actually renamed in this changeset revision
1129 """check if file was actually renamed in this changeset revision
1130
1130
1131 If rename logged in file revision, we report copy for changeset only
1131 If rename logged in file revision, we report copy for changeset only
1132 if file revisions linkrev points back to the changeset in question
1132 if file revisions linkrev points back to the changeset in question
1133 or both changeset parents contain different file revisions.
1133 or both changeset parents contain different file revisions.
1134 """
1134 """
1135
1135
1136 renamed = self._filelog.renamed(self._filenode)
1136 renamed = self._filelog.renamed(self._filenode)
1137 if not renamed:
1137 if not renamed:
1138 return renamed
1138 return renamed
1139
1139
1140 if self.rev() == self.linkrev():
1140 if self.rev() == self.linkrev():
1141 return renamed
1141 return renamed
1142
1142
1143 name = self.path()
1143 name = self.path()
1144 fnode = self._filenode
1144 fnode = self._filenode
1145 for p in self._changectx.parents():
1145 for p in self._changectx.parents():
1146 try:
1146 try:
1147 if fnode == p.filenode(name):
1147 if fnode == p.filenode(name):
1148 return None
1148 return None
1149 except error.LookupError:
1149 except error.LookupError:
1150 pass
1150 pass
1151 return renamed
1151 return renamed
1152
1152
1153 def children(self):
1153 def children(self):
1154 # hard for renames
1154 # hard for renames
1155 c = self._filelog.children(self._filenode)
1155 c = self._filelog.children(self._filenode)
1156 return [filectx(self._repo, self._path, fileid=x,
1156 return [filectx(self._repo, self._path, fileid=x,
1157 filelog=self._filelog) for x in c]
1157 filelog=self._filelog) for x in c]
1158
1158
1159 def blockancestors(fctx, fromline, toline):
1159 def _changesrange(fctx1, fctx2, linerange2, diffopts):
1160 """Yield ancestors of `fctx` with respect to the block of lines within
1161 `fromline`-`toline` range.
1162 """
1163 def changesrange(fctx1, fctx2, linerange2):
1164 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1160 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1165 if diff from fctx2 to fctx1 has changes in linerange2 and
1161 if diff from fctx2 to fctx1 has changes in linerange2 and
1166 `linerange1` is the new line range for fctx1.
1162 `linerange1` is the new line range for fctx1.
1167 """
1163 """
1168 diffopts = patch.diffopts(fctx._repo.ui)
1169 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1164 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1170 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1165 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1171 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1166 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1172 return diffinrange, linerange1
1167 return diffinrange, linerange1
1173
1168
1169 def blockancestors(fctx, fromline, toline):
1170 """Yield ancestors of `fctx` with respect to the block of lines within
1171 `fromline`-`toline` range.
1172 """
1173 diffopts = patch.diffopts(fctx._repo.ui)
1174 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1174 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1175 while visit:
1175 while visit:
1176 c, linerange2 = visit.pop(max(visit))
1176 c, linerange2 = visit.pop(max(visit))
1177 pl = c.parents()
1177 pl = c.parents()
1178 if not pl:
1178 if not pl:
1179 # The block originates from the initial revision.
1179 # The block originates from the initial revision.
1180 yield c
1180 yield c
1181 continue
1181 continue
1182 inrange = False
1182 inrange = False
1183 for p in pl:
1183 for p in pl:
1184 inrangep, linerange1 = changesrange(p, c, linerange2)
1184 inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts)
1185 inrange = inrange or inrangep
1185 inrange = inrange or inrangep
1186 if linerange1[0] == linerange1[1]:
1186 if linerange1[0] == linerange1[1]:
1187 # Parent's linerange is empty, meaning that the block got
1187 # Parent's linerange is empty, meaning that the block got
1188 # introduced in this revision; no need to go futher in this
1188 # introduced in this revision; no need to go futher in this
1189 # branch.
1189 # branch.
1190 continue
1190 continue
1191 visit[p.linkrev(), p.filenode()] = p, linerange1
1191 visit[p.linkrev(), p.filenode()] = p, linerange1
1192 if inrange:
1192 if inrange:
1193 yield c
1193 yield c
1194
1194
1195 class committablectx(basectx):
1195 class committablectx(basectx):
1196 """A committablectx object provides common functionality for a context that
1196 """A committablectx object provides common functionality for a context that
1197 wants the ability to commit, e.g. workingctx or memctx."""
1197 wants the ability to commit, e.g. workingctx or memctx."""
1198 def __init__(self, repo, text="", user=None, date=None, extra=None,
1198 def __init__(self, repo, text="", user=None, date=None, extra=None,
1199 changes=None):
1199 changes=None):
1200 self._repo = repo
1200 self._repo = repo
1201 self._rev = None
1201 self._rev = None
1202 self._node = None
1202 self._node = None
1203 self._text = text
1203 self._text = text
1204 if date:
1204 if date:
1205 self._date = util.parsedate(date)
1205 self._date = util.parsedate(date)
1206 if user:
1206 if user:
1207 self._user = user
1207 self._user = user
1208 if changes:
1208 if changes:
1209 self._status = changes
1209 self._status = changes
1210
1210
1211 self._extra = {}
1211 self._extra = {}
1212 if extra:
1212 if extra:
1213 self._extra = extra.copy()
1213 self._extra = extra.copy()
1214 if 'branch' not in self._extra:
1214 if 'branch' not in self._extra:
1215 try:
1215 try:
1216 branch = encoding.fromlocal(self._repo.dirstate.branch())
1216 branch = encoding.fromlocal(self._repo.dirstate.branch())
1217 except UnicodeDecodeError:
1217 except UnicodeDecodeError:
1218 raise error.Abort(_('branch name not in UTF-8!'))
1218 raise error.Abort(_('branch name not in UTF-8!'))
1219 self._extra['branch'] = branch
1219 self._extra['branch'] = branch
1220 if self._extra['branch'] == '':
1220 if self._extra['branch'] == '':
1221 self._extra['branch'] = 'default'
1221 self._extra['branch'] = 'default'
1222
1222
1223 def __str__(self):
1223 def __str__(self):
1224 return str(self._parents[0]) + "+"
1224 return str(self._parents[0]) + "+"
1225
1225
1226 def __nonzero__(self):
1226 def __nonzero__(self):
1227 return True
1227 return True
1228
1228
1229 def _buildflagfunc(self):
1229 def _buildflagfunc(self):
1230 # Create a fallback function for getting file flags when the
1230 # Create a fallback function for getting file flags when the
1231 # filesystem doesn't support them
1231 # filesystem doesn't support them
1232
1232
1233 copiesget = self._repo.dirstate.copies().get
1233 copiesget = self._repo.dirstate.copies().get
1234 parents = self.parents()
1234 parents = self.parents()
1235 if len(parents) < 2:
1235 if len(parents) < 2:
1236 # when we have one parent, it's easy: copy from parent
1236 # when we have one parent, it's easy: copy from parent
1237 man = parents[0].manifest()
1237 man = parents[0].manifest()
1238 def func(f):
1238 def func(f):
1239 f = copiesget(f, f)
1239 f = copiesget(f, f)
1240 return man.flags(f)
1240 return man.flags(f)
1241 else:
1241 else:
1242 # merges are tricky: we try to reconstruct the unstored
1242 # merges are tricky: we try to reconstruct the unstored
1243 # result from the merge (issue1802)
1243 # result from the merge (issue1802)
1244 p1, p2 = parents
1244 p1, p2 = parents
1245 pa = p1.ancestor(p2)
1245 pa = p1.ancestor(p2)
1246 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1246 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1247
1247
1248 def func(f):
1248 def func(f):
1249 f = copiesget(f, f) # may be wrong for merges with copies
1249 f = copiesget(f, f) # may be wrong for merges with copies
1250 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1250 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1251 if fl1 == fl2:
1251 if fl1 == fl2:
1252 return fl1
1252 return fl1
1253 if fl1 == fla:
1253 if fl1 == fla:
1254 return fl2
1254 return fl2
1255 if fl2 == fla:
1255 if fl2 == fla:
1256 return fl1
1256 return fl1
1257 return '' # punt for conflicts
1257 return '' # punt for conflicts
1258
1258
1259 return func
1259 return func
1260
1260
1261 @propertycache
1261 @propertycache
1262 def _flagfunc(self):
1262 def _flagfunc(self):
1263 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1263 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1264
1264
1265 @propertycache
1265 @propertycache
1266 def _manifest(self):
1266 def _manifest(self):
1267 """generate a manifest corresponding to the values in self._status
1267 """generate a manifest corresponding to the values in self._status
1268
1268
1269 This reuse the file nodeid from parent, but we append an extra letter
1269 This reuse the file nodeid from parent, but we append an extra letter
1270 when modified. Modified files get an extra 'm' while added files get
1270 when modified. Modified files get an extra 'm' while added files get
1271 an extra 'a'. This is used by manifests merge to see that files
1271 an extra 'a'. This is used by manifests merge to see that files
1272 are different and by update logic to avoid deleting newly added files.
1272 are different and by update logic to avoid deleting newly added files.
1273 """
1273 """
1274 parents = self.parents()
1274 parents = self.parents()
1275
1275
1276 man = parents[0].manifest().copy()
1276 man = parents[0].manifest().copy()
1277
1277
1278 ff = self._flagfunc
1278 ff = self._flagfunc
1279 for i, l in ((addednodeid, self._status.added),
1279 for i, l in ((addednodeid, self._status.added),
1280 (modifiednodeid, self._status.modified)):
1280 (modifiednodeid, self._status.modified)):
1281 for f in l:
1281 for f in l:
1282 man[f] = i
1282 man[f] = i
1283 try:
1283 try:
1284 man.setflag(f, ff(f))
1284 man.setflag(f, ff(f))
1285 except OSError:
1285 except OSError:
1286 pass
1286 pass
1287
1287
1288 for f in self._status.deleted + self._status.removed:
1288 for f in self._status.deleted + self._status.removed:
1289 if f in man:
1289 if f in man:
1290 del man[f]
1290 del man[f]
1291
1291
1292 return man
1292 return man
1293
1293
1294 @propertycache
1294 @propertycache
1295 def _status(self):
1295 def _status(self):
1296 return self._repo.status()
1296 return self._repo.status()
1297
1297
1298 @propertycache
1298 @propertycache
1299 def _user(self):
1299 def _user(self):
1300 return self._repo.ui.username()
1300 return self._repo.ui.username()
1301
1301
1302 @propertycache
1302 @propertycache
1303 def _date(self):
1303 def _date(self):
1304 return util.makedate()
1304 return util.makedate()
1305
1305
1306 def subrev(self, subpath):
1306 def subrev(self, subpath):
1307 return None
1307 return None
1308
1308
1309 def manifestnode(self):
1309 def manifestnode(self):
1310 return None
1310 return None
1311 def user(self):
1311 def user(self):
1312 return self._user or self._repo.ui.username()
1312 return self._user or self._repo.ui.username()
1313 def date(self):
1313 def date(self):
1314 return self._date
1314 return self._date
1315 def description(self):
1315 def description(self):
1316 return self._text
1316 return self._text
1317 def files(self):
1317 def files(self):
1318 return sorted(self._status.modified + self._status.added +
1318 return sorted(self._status.modified + self._status.added +
1319 self._status.removed)
1319 self._status.removed)
1320
1320
1321 def modified(self):
1321 def modified(self):
1322 return self._status.modified
1322 return self._status.modified
1323 def added(self):
1323 def added(self):
1324 return self._status.added
1324 return self._status.added
1325 def removed(self):
1325 def removed(self):
1326 return self._status.removed
1326 return self._status.removed
1327 def deleted(self):
1327 def deleted(self):
1328 return self._status.deleted
1328 return self._status.deleted
1329 def branch(self):
1329 def branch(self):
1330 return encoding.tolocal(self._extra['branch'])
1330 return encoding.tolocal(self._extra['branch'])
1331 def closesbranch(self):
1331 def closesbranch(self):
1332 return 'close' in self._extra
1332 return 'close' in self._extra
1333 def extra(self):
1333 def extra(self):
1334 return self._extra
1334 return self._extra
1335
1335
1336 def tags(self):
1336 def tags(self):
1337 return []
1337 return []
1338
1338
1339 def bookmarks(self):
1339 def bookmarks(self):
1340 b = []
1340 b = []
1341 for p in self.parents():
1341 for p in self.parents():
1342 b.extend(p.bookmarks())
1342 b.extend(p.bookmarks())
1343 return b
1343 return b
1344
1344
1345 def phase(self):
1345 def phase(self):
1346 phase = phases.draft # default phase to draft
1346 phase = phases.draft # default phase to draft
1347 for p in self.parents():
1347 for p in self.parents():
1348 phase = max(phase, p.phase())
1348 phase = max(phase, p.phase())
1349 return phase
1349 return phase
1350
1350
1351 def hidden(self):
1351 def hidden(self):
1352 return False
1352 return False
1353
1353
1354 def children(self):
1354 def children(self):
1355 return []
1355 return []
1356
1356
1357 def flags(self, path):
1357 def flags(self, path):
1358 if '_manifest' in self.__dict__:
1358 if '_manifest' in self.__dict__:
1359 try:
1359 try:
1360 return self._manifest.flags(path)
1360 return self._manifest.flags(path)
1361 except KeyError:
1361 except KeyError:
1362 return ''
1362 return ''
1363
1363
1364 try:
1364 try:
1365 return self._flagfunc(path)
1365 return self._flagfunc(path)
1366 except OSError:
1366 except OSError:
1367 return ''
1367 return ''
1368
1368
1369 def ancestor(self, c2):
1369 def ancestor(self, c2):
1370 """return the "best" ancestor context of self and c2"""
1370 """return the "best" ancestor context of self and c2"""
1371 return self._parents[0].ancestor(c2) # punt on two parents for now
1371 return self._parents[0].ancestor(c2) # punt on two parents for now
1372
1372
1373 def walk(self, match):
1373 def walk(self, match):
1374 '''Generates matching file names.'''
1374 '''Generates matching file names.'''
1375 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1375 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1376 True, False))
1376 True, False))
1377
1377
1378 def matches(self, match):
1378 def matches(self, match):
1379 return sorted(self._repo.dirstate.matches(match))
1379 return sorted(self._repo.dirstate.matches(match))
1380
1380
1381 def ancestors(self):
1381 def ancestors(self):
1382 for p in self._parents:
1382 for p in self._parents:
1383 yield p
1383 yield p
1384 for a in self._repo.changelog.ancestors(
1384 for a in self._repo.changelog.ancestors(
1385 [p.rev() for p in self._parents]):
1385 [p.rev() for p in self._parents]):
1386 yield changectx(self._repo, a)
1386 yield changectx(self._repo, a)
1387
1387
1388 def markcommitted(self, node):
1388 def markcommitted(self, node):
1389 """Perform post-commit cleanup necessary after committing this ctx
1389 """Perform post-commit cleanup necessary after committing this ctx
1390
1390
1391 Specifically, this updates backing stores this working context
1391 Specifically, this updates backing stores this working context
1392 wraps to reflect the fact that the changes reflected by this
1392 wraps to reflect the fact that the changes reflected by this
1393 workingctx have been committed. For example, it marks
1393 workingctx have been committed. For example, it marks
1394 modified and added files as normal in the dirstate.
1394 modified and added files as normal in the dirstate.
1395
1395
1396 """
1396 """
1397
1397
1398 self._repo.dirstate.beginparentchange()
1398 self._repo.dirstate.beginparentchange()
1399 for f in self.modified() + self.added():
1399 for f in self.modified() + self.added():
1400 self._repo.dirstate.normal(f)
1400 self._repo.dirstate.normal(f)
1401 for f in self.removed():
1401 for f in self.removed():
1402 self._repo.dirstate.drop(f)
1402 self._repo.dirstate.drop(f)
1403 self._repo.dirstate.setparents(node)
1403 self._repo.dirstate.setparents(node)
1404 self._repo.dirstate.endparentchange()
1404 self._repo.dirstate.endparentchange()
1405
1405
1406 # write changes out explicitly, because nesting wlock at
1406 # write changes out explicitly, because nesting wlock at
1407 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1407 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1408 # from immediately doing so for subsequent changing files
1408 # from immediately doing so for subsequent changing files
1409 self._repo.dirstate.write(self._repo.currenttransaction())
1409 self._repo.dirstate.write(self._repo.currenttransaction())
1410
1410
1411 class workingctx(committablectx):
1411 class workingctx(committablectx):
1412 """A workingctx object makes access to data related to
1412 """A workingctx object makes access to data related to
1413 the current working directory convenient.
1413 the current working directory convenient.
1414 date - any valid date string or (unixtime, offset), or None.
1414 date - any valid date string or (unixtime, offset), or None.
1415 user - username string, or None.
1415 user - username string, or None.
1416 extra - a dictionary of extra values, or None.
1416 extra - a dictionary of extra values, or None.
1417 changes - a list of file lists as returned by localrepo.status()
1417 changes - a list of file lists as returned by localrepo.status()
1418 or None to use the repository status.
1418 or None to use the repository status.
1419 """
1419 """
1420 def __init__(self, repo, text="", user=None, date=None, extra=None,
1420 def __init__(self, repo, text="", user=None, date=None, extra=None,
1421 changes=None):
1421 changes=None):
1422 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1422 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1423
1423
1424 def __iter__(self):
1424 def __iter__(self):
1425 d = self._repo.dirstate
1425 d = self._repo.dirstate
1426 for f in d:
1426 for f in d:
1427 if d[f] != 'r':
1427 if d[f] != 'r':
1428 yield f
1428 yield f
1429
1429
1430 def __contains__(self, key):
1430 def __contains__(self, key):
1431 return self._repo.dirstate[key] not in "?r"
1431 return self._repo.dirstate[key] not in "?r"
1432
1432
1433 def hex(self):
1433 def hex(self):
1434 return hex(wdirid)
1434 return hex(wdirid)
1435
1435
1436 @propertycache
1436 @propertycache
1437 def _parents(self):
1437 def _parents(self):
1438 p = self._repo.dirstate.parents()
1438 p = self._repo.dirstate.parents()
1439 if p[1] == nullid:
1439 if p[1] == nullid:
1440 p = p[:-1]
1440 p = p[:-1]
1441 return [changectx(self._repo, x) for x in p]
1441 return [changectx(self._repo, x) for x in p]
1442
1442
1443 def filectx(self, path, filelog=None):
1443 def filectx(self, path, filelog=None):
1444 """get a file context from the working directory"""
1444 """get a file context from the working directory"""
1445 return workingfilectx(self._repo, path, workingctx=self,
1445 return workingfilectx(self._repo, path, workingctx=self,
1446 filelog=filelog)
1446 filelog=filelog)
1447
1447
1448 def dirty(self, missing=False, merge=True, branch=True):
1448 def dirty(self, missing=False, merge=True, branch=True):
1449 "check whether a working directory is modified"
1449 "check whether a working directory is modified"
1450 # check subrepos first
1450 # check subrepos first
1451 for s in sorted(self.substate):
1451 for s in sorted(self.substate):
1452 if self.sub(s).dirty():
1452 if self.sub(s).dirty():
1453 return True
1453 return True
1454 # check current working dir
1454 # check current working dir
1455 return ((merge and self.p2()) or
1455 return ((merge and self.p2()) or
1456 (branch and self.branch() != self.p1().branch()) or
1456 (branch and self.branch() != self.p1().branch()) or
1457 self.modified() or self.added() or self.removed() or
1457 self.modified() or self.added() or self.removed() or
1458 (missing and self.deleted()))
1458 (missing and self.deleted()))
1459
1459
1460 def add(self, list, prefix=""):
1460 def add(self, list, prefix=""):
1461 join = lambda f: os.path.join(prefix, f)
1461 join = lambda f: os.path.join(prefix, f)
1462 with self._repo.wlock():
1462 with self._repo.wlock():
1463 ui, ds = self._repo.ui, self._repo.dirstate
1463 ui, ds = self._repo.ui, self._repo.dirstate
1464 rejected = []
1464 rejected = []
1465 lstat = self._repo.wvfs.lstat
1465 lstat = self._repo.wvfs.lstat
1466 for f in list:
1466 for f in list:
1467 scmutil.checkportable(ui, join(f))
1467 scmutil.checkportable(ui, join(f))
1468 try:
1468 try:
1469 st = lstat(f)
1469 st = lstat(f)
1470 except OSError:
1470 except OSError:
1471 ui.warn(_("%s does not exist!\n") % join(f))
1471 ui.warn(_("%s does not exist!\n") % join(f))
1472 rejected.append(f)
1472 rejected.append(f)
1473 continue
1473 continue
1474 if st.st_size > 10000000:
1474 if st.st_size > 10000000:
1475 ui.warn(_("%s: up to %d MB of RAM may be required "
1475 ui.warn(_("%s: up to %d MB of RAM may be required "
1476 "to manage this file\n"
1476 "to manage this file\n"
1477 "(use 'hg revert %s' to cancel the "
1477 "(use 'hg revert %s' to cancel the "
1478 "pending addition)\n")
1478 "pending addition)\n")
1479 % (f, 3 * st.st_size // 1000000, join(f)))
1479 % (f, 3 * st.st_size // 1000000, join(f)))
1480 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1480 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1481 ui.warn(_("%s not added: only files and symlinks "
1481 ui.warn(_("%s not added: only files and symlinks "
1482 "supported currently\n") % join(f))
1482 "supported currently\n") % join(f))
1483 rejected.append(f)
1483 rejected.append(f)
1484 elif ds[f] in 'amn':
1484 elif ds[f] in 'amn':
1485 ui.warn(_("%s already tracked!\n") % join(f))
1485 ui.warn(_("%s already tracked!\n") % join(f))
1486 elif ds[f] == 'r':
1486 elif ds[f] == 'r':
1487 ds.normallookup(f)
1487 ds.normallookup(f)
1488 else:
1488 else:
1489 ds.add(f)
1489 ds.add(f)
1490 return rejected
1490 return rejected
1491
1491
1492 def forget(self, files, prefix=""):
1492 def forget(self, files, prefix=""):
1493 join = lambda f: os.path.join(prefix, f)
1493 join = lambda f: os.path.join(prefix, f)
1494 with self._repo.wlock():
1494 with self._repo.wlock():
1495 rejected = []
1495 rejected = []
1496 for f in files:
1496 for f in files:
1497 if f not in self._repo.dirstate:
1497 if f not in self._repo.dirstate:
1498 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1498 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1499 rejected.append(f)
1499 rejected.append(f)
1500 elif self._repo.dirstate[f] != 'a':
1500 elif self._repo.dirstate[f] != 'a':
1501 self._repo.dirstate.remove(f)
1501 self._repo.dirstate.remove(f)
1502 else:
1502 else:
1503 self._repo.dirstate.drop(f)
1503 self._repo.dirstate.drop(f)
1504 return rejected
1504 return rejected
1505
1505
1506 def undelete(self, list):
1506 def undelete(self, list):
1507 pctxs = self.parents()
1507 pctxs = self.parents()
1508 with self._repo.wlock():
1508 with self._repo.wlock():
1509 for f in list:
1509 for f in list:
1510 if self._repo.dirstate[f] != 'r':
1510 if self._repo.dirstate[f] != 'r':
1511 self._repo.ui.warn(_("%s not removed!\n") % f)
1511 self._repo.ui.warn(_("%s not removed!\n") % f)
1512 else:
1512 else:
1513 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1513 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1514 t = fctx.data()
1514 t = fctx.data()
1515 self._repo.wwrite(f, t, fctx.flags())
1515 self._repo.wwrite(f, t, fctx.flags())
1516 self._repo.dirstate.normal(f)
1516 self._repo.dirstate.normal(f)
1517
1517
1518 def copy(self, source, dest):
1518 def copy(self, source, dest):
1519 try:
1519 try:
1520 st = self._repo.wvfs.lstat(dest)
1520 st = self._repo.wvfs.lstat(dest)
1521 except OSError as err:
1521 except OSError as err:
1522 if err.errno != errno.ENOENT:
1522 if err.errno != errno.ENOENT:
1523 raise
1523 raise
1524 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1524 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1525 return
1525 return
1526 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1526 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1527 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1527 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1528 "symbolic link\n") % dest)
1528 "symbolic link\n") % dest)
1529 else:
1529 else:
1530 with self._repo.wlock():
1530 with self._repo.wlock():
1531 if self._repo.dirstate[dest] in '?':
1531 if self._repo.dirstate[dest] in '?':
1532 self._repo.dirstate.add(dest)
1532 self._repo.dirstate.add(dest)
1533 elif self._repo.dirstate[dest] in 'r':
1533 elif self._repo.dirstate[dest] in 'r':
1534 self._repo.dirstate.normallookup(dest)
1534 self._repo.dirstate.normallookup(dest)
1535 self._repo.dirstate.copy(source, dest)
1535 self._repo.dirstate.copy(source, dest)
1536
1536
1537 def match(self, pats=[], include=None, exclude=None, default='glob',
1537 def match(self, pats=[], include=None, exclude=None, default='glob',
1538 listsubrepos=False, badfn=None):
1538 listsubrepos=False, badfn=None):
1539 r = self._repo
1539 r = self._repo
1540
1540
1541 # Only a case insensitive filesystem needs magic to translate user input
1541 # Only a case insensitive filesystem needs magic to translate user input
1542 # to actual case in the filesystem.
1542 # to actual case in the filesystem.
1543 if not util.fscasesensitive(r.root):
1543 if not util.fscasesensitive(r.root):
1544 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1544 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1545 exclude, default, r.auditor, self,
1545 exclude, default, r.auditor, self,
1546 listsubrepos=listsubrepos,
1546 listsubrepos=listsubrepos,
1547 badfn=badfn)
1547 badfn=badfn)
1548 return matchmod.match(r.root, r.getcwd(), pats,
1548 return matchmod.match(r.root, r.getcwd(), pats,
1549 include, exclude, default,
1549 include, exclude, default,
1550 auditor=r.auditor, ctx=self,
1550 auditor=r.auditor, ctx=self,
1551 listsubrepos=listsubrepos, badfn=badfn)
1551 listsubrepos=listsubrepos, badfn=badfn)
1552
1552
1553 def _filtersuspectsymlink(self, files):
1553 def _filtersuspectsymlink(self, files):
1554 if not files or self._repo.dirstate._checklink:
1554 if not files or self._repo.dirstate._checklink:
1555 return files
1555 return files
1556
1556
1557 # Symlink placeholders may get non-symlink-like contents
1557 # Symlink placeholders may get non-symlink-like contents
1558 # via user error or dereferencing by NFS or Samba servers,
1558 # via user error or dereferencing by NFS or Samba servers,
1559 # so we filter out any placeholders that don't look like a
1559 # so we filter out any placeholders that don't look like a
1560 # symlink
1560 # symlink
1561 sane = []
1561 sane = []
1562 for f in files:
1562 for f in files:
1563 if self.flags(f) == 'l':
1563 if self.flags(f) == 'l':
1564 d = self[f].data()
1564 d = self[f].data()
1565 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1565 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1566 self._repo.ui.debug('ignoring suspect symlink placeholder'
1566 self._repo.ui.debug('ignoring suspect symlink placeholder'
1567 ' "%s"\n' % f)
1567 ' "%s"\n' % f)
1568 continue
1568 continue
1569 sane.append(f)
1569 sane.append(f)
1570 return sane
1570 return sane
1571
1571
1572 def _checklookup(self, files):
1572 def _checklookup(self, files):
1573 # check for any possibly clean files
1573 # check for any possibly clean files
1574 if not files:
1574 if not files:
1575 return [], []
1575 return [], []
1576
1576
1577 modified = []
1577 modified = []
1578 fixup = []
1578 fixup = []
1579 pctx = self._parents[0]
1579 pctx = self._parents[0]
1580 # do a full compare of any files that might have changed
1580 # do a full compare of any files that might have changed
1581 for f in sorted(files):
1581 for f in sorted(files):
1582 if (f not in pctx or self.flags(f) != pctx.flags(f)
1582 if (f not in pctx or self.flags(f) != pctx.flags(f)
1583 or pctx[f].cmp(self[f])):
1583 or pctx[f].cmp(self[f])):
1584 modified.append(f)
1584 modified.append(f)
1585 else:
1585 else:
1586 fixup.append(f)
1586 fixup.append(f)
1587
1587
1588 # update dirstate for files that are actually clean
1588 # update dirstate for files that are actually clean
1589 if fixup:
1589 if fixup:
1590 try:
1590 try:
1591 # updating the dirstate is optional
1591 # updating the dirstate is optional
1592 # so we don't wait on the lock
1592 # so we don't wait on the lock
1593 # wlock can invalidate the dirstate, so cache normal _after_
1593 # wlock can invalidate the dirstate, so cache normal _after_
1594 # taking the lock
1594 # taking the lock
1595 with self._repo.wlock(False):
1595 with self._repo.wlock(False):
1596 normal = self._repo.dirstate.normal
1596 normal = self._repo.dirstate.normal
1597 for f in fixup:
1597 for f in fixup:
1598 normal(f)
1598 normal(f)
1599 # write changes out explicitly, because nesting
1599 # write changes out explicitly, because nesting
1600 # wlock at runtime may prevent 'wlock.release()'
1600 # wlock at runtime may prevent 'wlock.release()'
1601 # after this block from doing so for subsequent
1601 # after this block from doing so for subsequent
1602 # changing files
1602 # changing files
1603 self._repo.dirstate.write(self._repo.currenttransaction())
1603 self._repo.dirstate.write(self._repo.currenttransaction())
1604 except error.LockError:
1604 except error.LockError:
1605 pass
1605 pass
1606 return modified, fixup
1606 return modified, fixup
1607
1607
1608 def _manifestmatches(self, match, s):
1608 def _manifestmatches(self, match, s):
1609 """Slow path for workingctx
1609 """Slow path for workingctx
1610
1610
1611 The fast path is when we compare the working directory to its parent
1611 The fast path is when we compare the working directory to its parent
1612 which means this function is comparing with a non-parent; therefore we
1612 which means this function is comparing with a non-parent; therefore we
1613 need to build a manifest and return what matches.
1613 need to build a manifest and return what matches.
1614 """
1614 """
1615 mf = self._repo['.']._manifestmatches(match, s)
1615 mf = self._repo['.']._manifestmatches(match, s)
1616 for f in s.modified + s.added:
1616 for f in s.modified + s.added:
1617 mf[f] = newnodeid
1617 mf[f] = newnodeid
1618 mf.setflag(f, self.flags(f))
1618 mf.setflag(f, self.flags(f))
1619 for f in s.removed:
1619 for f in s.removed:
1620 if f in mf:
1620 if f in mf:
1621 del mf[f]
1621 del mf[f]
1622 return mf
1622 return mf
1623
1623
1624 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1624 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1625 unknown=False):
1625 unknown=False):
1626 '''Gets the status from the dirstate -- internal use only.'''
1626 '''Gets the status from the dirstate -- internal use only.'''
1627 listignored, listclean, listunknown = ignored, clean, unknown
1627 listignored, listclean, listunknown = ignored, clean, unknown
1628 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1628 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1629 subrepos = []
1629 subrepos = []
1630 if '.hgsub' in self:
1630 if '.hgsub' in self:
1631 subrepos = sorted(self.substate)
1631 subrepos = sorted(self.substate)
1632 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1632 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1633 listclean, listunknown)
1633 listclean, listunknown)
1634
1634
1635 # check for any possibly clean files
1635 # check for any possibly clean files
1636 if cmp:
1636 if cmp:
1637 modified2, fixup = self._checklookup(cmp)
1637 modified2, fixup = self._checklookup(cmp)
1638 s.modified.extend(modified2)
1638 s.modified.extend(modified2)
1639
1639
1640 # update dirstate for files that are actually clean
1640 # update dirstate for files that are actually clean
1641 if fixup and listclean:
1641 if fixup and listclean:
1642 s.clean.extend(fixup)
1642 s.clean.extend(fixup)
1643
1643
1644 if match.always():
1644 if match.always():
1645 # cache for performance
1645 # cache for performance
1646 if s.unknown or s.ignored or s.clean:
1646 if s.unknown or s.ignored or s.clean:
1647 # "_status" is cached with list*=False in the normal route
1647 # "_status" is cached with list*=False in the normal route
1648 self._status = scmutil.status(s.modified, s.added, s.removed,
1648 self._status = scmutil.status(s.modified, s.added, s.removed,
1649 s.deleted, [], [], [])
1649 s.deleted, [], [], [])
1650 else:
1650 else:
1651 self._status = s
1651 self._status = s
1652
1652
1653 return s
1653 return s
1654
1654
1655 def _buildstatus(self, other, s, match, listignored, listclean,
1655 def _buildstatus(self, other, s, match, listignored, listclean,
1656 listunknown):
1656 listunknown):
1657 """build a status with respect to another context
1657 """build a status with respect to another context
1658
1658
1659 This includes logic for maintaining the fast path of status when
1659 This includes logic for maintaining the fast path of status when
1660 comparing the working directory against its parent, which is to skip
1660 comparing the working directory against its parent, which is to skip
1661 building a new manifest if self (working directory) is not comparing
1661 building a new manifest if self (working directory) is not comparing
1662 against its parent (repo['.']).
1662 against its parent (repo['.']).
1663 """
1663 """
1664 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1664 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1665 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1665 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1666 # might have accidentally ended up with the entire contents of the file
1666 # might have accidentally ended up with the entire contents of the file
1667 # they are supposed to be linking to.
1667 # they are supposed to be linking to.
1668 s.modified[:] = self._filtersuspectsymlink(s.modified)
1668 s.modified[:] = self._filtersuspectsymlink(s.modified)
1669 if other != self._repo['.']:
1669 if other != self._repo['.']:
1670 s = super(workingctx, self)._buildstatus(other, s, match,
1670 s = super(workingctx, self)._buildstatus(other, s, match,
1671 listignored, listclean,
1671 listignored, listclean,
1672 listunknown)
1672 listunknown)
1673 return s
1673 return s
1674
1674
1675 def _matchstatus(self, other, match):
1675 def _matchstatus(self, other, match):
1676 """override the match method with a filter for directory patterns
1676 """override the match method with a filter for directory patterns
1677
1677
1678 We use inheritance to customize the match.bad method only in cases of
1678 We use inheritance to customize the match.bad method only in cases of
1679 workingctx since it belongs only to the working directory when
1679 workingctx since it belongs only to the working directory when
1680 comparing against the parent changeset.
1680 comparing against the parent changeset.
1681
1681
1682 If we aren't comparing against the working directory's parent, then we
1682 If we aren't comparing against the working directory's parent, then we
1683 just use the default match object sent to us.
1683 just use the default match object sent to us.
1684 """
1684 """
1685 superself = super(workingctx, self)
1685 superself = super(workingctx, self)
1686 match = superself._matchstatus(other, match)
1686 match = superself._matchstatus(other, match)
1687 if other != self._repo['.']:
1687 if other != self._repo['.']:
1688 def bad(f, msg):
1688 def bad(f, msg):
1689 # 'f' may be a directory pattern from 'match.files()',
1689 # 'f' may be a directory pattern from 'match.files()',
1690 # so 'f not in ctx1' is not enough
1690 # so 'f not in ctx1' is not enough
1691 if f not in other and not other.hasdir(f):
1691 if f not in other and not other.hasdir(f):
1692 self._repo.ui.warn('%s: %s\n' %
1692 self._repo.ui.warn('%s: %s\n' %
1693 (self._repo.dirstate.pathto(f), msg))
1693 (self._repo.dirstate.pathto(f), msg))
1694 match.bad = bad
1694 match.bad = bad
1695 return match
1695 return match
1696
1696
1697 class committablefilectx(basefilectx):
1697 class committablefilectx(basefilectx):
1698 """A committablefilectx provides common functionality for a file context
1698 """A committablefilectx provides common functionality for a file context
1699 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1699 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1700 def __init__(self, repo, path, filelog=None, ctx=None):
1700 def __init__(self, repo, path, filelog=None, ctx=None):
1701 self._repo = repo
1701 self._repo = repo
1702 self._path = path
1702 self._path = path
1703 self._changeid = None
1703 self._changeid = None
1704 self._filerev = self._filenode = None
1704 self._filerev = self._filenode = None
1705
1705
1706 if filelog is not None:
1706 if filelog is not None:
1707 self._filelog = filelog
1707 self._filelog = filelog
1708 if ctx:
1708 if ctx:
1709 self._changectx = ctx
1709 self._changectx = ctx
1710
1710
1711 def __nonzero__(self):
1711 def __nonzero__(self):
1712 return True
1712 return True
1713
1713
1714 def linkrev(self):
1714 def linkrev(self):
1715 # linked to self._changectx no matter if file is modified or not
1715 # linked to self._changectx no matter if file is modified or not
1716 return self.rev()
1716 return self.rev()
1717
1717
1718 def parents(self):
1718 def parents(self):
1719 '''return parent filectxs, following copies if necessary'''
1719 '''return parent filectxs, following copies if necessary'''
1720 def filenode(ctx, path):
1720 def filenode(ctx, path):
1721 return ctx._manifest.get(path, nullid)
1721 return ctx._manifest.get(path, nullid)
1722
1722
1723 path = self._path
1723 path = self._path
1724 fl = self._filelog
1724 fl = self._filelog
1725 pcl = self._changectx._parents
1725 pcl = self._changectx._parents
1726 renamed = self.renamed()
1726 renamed = self.renamed()
1727
1727
1728 if renamed:
1728 if renamed:
1729 pl = [renamed + (None,)]
1729 pl = [renamed + (None,)]
1730 else:
1730 else:
1731 pl = [(path, filenode(pcl[0], path), fl)]
1731 pl = [(path, filenode(pcl[0], path), fl)]
1732
1732
1733 for pc in pcl[1:]:
1733 for pc in pcl[1:]:
1734 pl.append((path, filenode(pc, path), fl))
1734 pl.append((path, filenode(pc, path), fl))
1735
1735
1736 return [self._parentfilectx(p, fileid=n, filelog=l)
1736 return [self._parentfilectx(p, fileid=n, filelog=l)
1737 for p, n, l in pl if n != nullid]
1737 for p, n, l in pl if n != nullid]
1738
1738
1739 def children(self):
1739 def children(self):
1740 return []
1740 return []
1741
1741
1742 class workingfilectx(committablefilectx):
1742 class workingfilectx(committablefilectx):
1743 """A workingfilectx object makes access to data related to a particular
1743 """A workingfilectx object makes access to data related to a particular
1744 file in the working directory convenient."""
1744 file in the working directory convenient."""
1745 def __init__(self, repo, path, filelog=None, workingctx=None):
1745 def __init__(self, repo, path, filelog=None, workingctx=None):
1746 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1746 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1747
1747
1748 @propertycache
1748 @propertycache
1749 def _changectx(self):
1749 def _changectx(self):
1750 return workingctx(self._repo)
1750 return workingctx(self._repo)
1751
1751
1752 def data(self):
1752 def data(self):
1753 return self._repo.wread(self._path)
1753 return self._repo.wread(self._path)
1754 def renamed(self):
1754 def renamed(self):
1755 rp = self._repo.dirstate.copied(self._path)
1755 rp = self._repo.dirstate.copied(self._path)
1756 if not rp:
1756 if not rp:
1757 return None
1757 return None
1758 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1758 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1759
1759
1760 def size(self):
1760 def size(self):
1761 return self._repo.wvfs.lstat(self._path).st_size
1761 return self._repo.wvfs.lstat(self._path).st_size
1762 def date(self):
1762 def date(self):
1763 t, tz = self._changectx.date()
1763 t, tz = self._changectx.date()
1764 try:
1764 try:
1765 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1765 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1766 except OSError as err:
1766 except OSError as err:
1767 if err.errno != errno.ENOENT:
1767 if err.errno != errno.ENOENT:
1768 raise
1768 raise
1769 return (t, tz)
1769 return (t, tz)
1770
1770
1771 def cmp(self, fctx):
1771 def cmp(self, fctx):
1772 """compare with other file context
1772 """compare with other file context
1773
1773
1774 returns True if different than fctx.
1774 returns True if different than fctx.
1775 """
1775 """
1776 # fctx should be a filectx (not a workingfilectx)
1776 # fctx should be a filectx (not a workingfilectx)
1777 # invert comparison to reuse the same code path
1777 # invert comparison to reuse the same code path
1778 return fctx.cmp(self)
1778 return fctx.cmp(self)
1779
1779
1780 def remove(self, ignoremissing=False):
1780 def remove(self, ignoremissing=False):
1781 """wraps unlink for a repo's working directory"""
1781 """wraps unlink for a repo's working directory"""
1782 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1782 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1783
1783
1784 def write(self, data, flags):
1784 def write(self, data, flags):
1785 """wraps repo.wwrite"""
1785 """wraps repo.wwrite"""
1786 self._repo.wwrite(self._path, data, flags)
1786 self._repo.wwrite(self._path, data, flags)
1787
1787
1788 class workingcommitctx(workingctx):
1788 class workingcommitctx(workingctx):
1789 """A workingcommitctx object makes access to data related to
1789 """A workingcommitctx object makes access to data related to
1790 the revision being committed convenient.
1790 the revision being committed convenient.
1791
1791
1792 This hides changes in the working directory, if they aren't
1792 This hides changes in the working directory, if they aren't
1793 committed in this context.
1793 committed in this context.
1794 """
1794 """
1795 def __init__(self, repo, changes,
1795 def __init__(self, repo, changes,
1796 text="", user=None, date=None, extra=None):
1796 text="", user=None, date=None, extra=None):
1797 super(workingctx, self).__init__(repo, text, user, date, extra,
1797 super(workingctx, self).__init__(repo, text, user, date, extra,
1798 changes)
1798 changes)
1799
1799
1800 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1800 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1801 unknown=False):
1801 unknown=False):
1802 """Return matched files only in ``self._status``
1802 """Return matched files only in ``self._status``
1803
1803
1804 Uncommitted files appear "clean" via this context, even if
1804 Uncommitted files appear "clean" via this context, even if
1805 they aren't actually so in the working directory.
1805 they aren't actually so in the working directory.
1806 """
1806 """
1807 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1807 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1808 if clean:
1808 if clean:
1809 clean = [f for f in self._manifest if f not in self._changedset]
1809 clean = [f for f in self._manifest if f not in self._changedset]
1810 else:
1810 else:
1811 clean = []
1811 clean = []
1812 return scmutil.status([f for f in self._status.modified if match(f)],
1812 return scmutil.status([f for f in self._status.modified if match(f)],
1813 [f for f in self._status.added if match(f)],
1813 [f for f in self._status.added if match(f)],
1814 [f for f in self._status.removed if match(f)],
1814 [f for f in self._status.removed if match(f)],
1815 [], [], [], clean)
1815 [], [], [], clean)
1816
1816
1817 @propertycache
1817 @propertycache
1818 def _changedset(self):
1818 def _changedset(self):
1819 """Return the set of files changed in this context
1819 """Return the set of files changed in this context
1820 """
1820 """
1821 changed = set(self._status.modified)
1821 changed = set(self._status.modified)
1822 changed.update(self._status.added)
1822 changed.update(self._status.added)
1823 changed.update(self._status.removed)
1823 changed.update(self._status.removed)
1824 return changed
1824 return changed
1825
1825
1826 def makecachingfilectxfn(func):
1826 def makecachingfilectxfn(func):
1827 """Create a filectxfn that caches based on the path.
1827 """Create a filectxfn that caches based on the path.
1828
1828
1829 We can't use util.cachefunc because it uses all arguments as the cache
1829 We can't use util.cachefunc because it uses all arguments as the cache
1830 key and this creates a cycle since the arguments include the repo and
1830 key and this creates a cycle since the arguments include the repo and
1831 memctx.
1831 memctx.
1832 """
1832 """
1833 cache = {}
1833 cache = {}
1834
1834
1835 def getfilectx(repo, memctx, path):
1835 def getfilectx(repo, memctx, path):
1836 if path not in cache:
1836 if path not in cache:
1837 cache[path] = func(repo, memctx, path)
1837 cache[path] = func(repo, memctx, path)
1838 return cache[path]
1838 return cache[path]
1839
1839
1840 return getfilectx
1840 return getfilectx
1841
1841
1842 class memctx(committablectx):
1842 class memctx(committablectx):
1843 """Use memctx to perform in-memory commits via localrepo.commitctx().
1843 """Use memctx to perform in-memory commits via localrepo.commitctx().
1844
1844
1845 Revision information is supplied at initialization time while
1845 Revision information is supplied at initialization time while
1846 related files data and is made available through a callback
1846 related files data and is made available through a callback
1847 mechanism. 'repo' is the current localrepo, 'parents' is a
1847 mechanism. 'repo' is the current localrepo, 'parents' is a
1848 sequence of two parent revisions identifiers (pass None for every
1848 sequence of two parent revisions identifiers (pass None for every
1849 missing parent), 'text' is the commit message and 'files' lists
1849 missing parent), 'text' is the commit message and 'files' lists
1850 names of files touched by the revision (normalized and relative to
1850 names of files touched by the revision (normalized and relative to
1851 repository root).
1851 repository root).
1852
1852
1853 filectxfn(repo, memctx, path) is a callable receiving the
1853 filectxfn(repo, memctx, path) is a callable receiving the
1854 repository, the current memctx object and the normalized path of
1854 repository, the current memctx object and the normalized path of
1855 requested file, relative to repository root. It is fired by the
1855 requested file, relative to repository root. It is fired by the
1856 commit function for every file in 'files', but calls order is
1856 commit function for every file in 'files', but calls order is
1857 undefined. If the file is available in the revision being
1857 undefined. If the file is available in the revision being
1858 committed (updated or added), filectxfn returns a memfilectx
1858 committed (updated or added), filectxfn returns a memfilectx
1859 object. If the file was removed, filectxfn raises an
1859 object. If the file was removed, filectxfn raises an
1860 IOError. Moved files are represented by marking the source file
1860 IOError. Moved files are represented by marking the source file
1861 removed and the new file added with copy information (see
1861 removed and the new file added with copy information (see
1862 memfilectx).
1862 memfilectx).
1863
1863
1864 user receives the committer name and defaults to current
1864 user receives the committer name and defaults to current
1865 repository username, date is the commit date in any format
1865 repository username, date is the commit date in any format
1866 supported by util.parsedate() and defaults to current date, extra
1866 supported by util.parsedate() and defaults to current date, extra
1867 is a dictionary of metadata or is left empty.
1867 is a dictionary of metadata or is left empty.
1868 """
1868 """
1869
1869
1870 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1870 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1871 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1871 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1872 # this field to determine what to do in filectxfn.
1872 # this field to determine what to do in filectxfn.
1873 _returnnoneformissingfiles = True
1873 _returnnoneformissingfiles = True
1874
1874
1875 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1875 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1876 date=None, extra=None, editor=False):
1876 date=None, extra=None, editor=False):
1877 super(memctx, self).__init__(repo, text, user, date, extra)
1877 super(memctx, self).__init__(repo, text, user, date, extra)
1878 self._rev = None
1878 self._rev = None
1879 self._node = None
1879 self._node = None
1880 parents = [(p or nullid) for p in parents]
1880 parents = [(p or nullid) for p in parents]
1881 p1, p2 = parents
1881 p1, p2 = parents
1882 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1882 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1883 files = sorted(set(files))
1883 files = sorted(set(files))
1884 self._files = files
1884 self._files = files
1885 self.substate = {}
1885 self.substate = {}
1886
1886
1887 # if store is not callable, wrap it in a function
1887 # if store is not callable, wrap it in a function
1888 if not callable(filectxfn):
1888 if not callable(filectxfn):
1889 def getfilectx(repo, memctx, path):
1889 def getfilectx(repo, memctx, path):
1890 fctx = filectxfn[path]
1890 fctx = filectxfn[path]
1891 # this is weird but apparently we only keep track of one parent
1891 # this is weird but apparently we only keep track of one parent
1892 # (why not only store that instead of a tuple?)
1892 # (why not only store that instead of a tuple?)
1893 copied = fctx.renamed()
1893 copied = fctx.renamed()
1894 if copied:
1894 if copied:
1895 copied = copied[0]
1895 copied = copied[0]
1896 return memfilectx(repo, path, fctx.data(),
1896 return memfilectx(repo, path, fctx.data(),
1897 islink=fctx.islink(), isexec=fctx.isexec(),
1897 islink=fctx.islink(), isexec=fctx.isexec(),
1898 copied=copied, memctx=memctx)
1898 copied=copied, memctx=memctx)
1899 self._filectxfn = getfilectx
1899 self._filectxfn = getfilectx
1900 else:
1900 else:
1901 # memoizing increases performance for e.g. vcs convert scenarios.
1901 # memoizing increases performance for e.g. vcs convert scenarios.
1902 self._filectxfn = makecachingfilectxfn(filectxfn)
1902 self._filectxfn = makecachingfilectxfn(filectxfn)
1903
1903
1904 if extra:
1904 if extra:
1905 self._extra = extra.copy()
1905 self._extra = extra.copy()
1906 else:
1906 else:
1907 self._extra = {}
1907 self._extra = {}
1908
1908
1909 if self._extra.get('branch', '') == '':
1909 if self._extra.get('branch', '') == '':
1910 self._extra['branch'] = 'default'
1910 self._extra['branch'] = 'default'
1911
1911
1912 if editor:
1912 if editor:
1913 self._text = editor(self._repo, self, [])
1913 self._text = editor(self._repo, self, [])
1914 self._repo.savecommitmessage(self._text)
1914 self._repo.savecommitmessage(self._text)
1915
1915
1916 def filectx(self, path, filelog=None):
1916 def filectx(self, path, filelog=None):
1917 """get a file context from the working directory
1917 """get a file context from the working directory
1918
1918
1919 Returns None if file doesn't exist and should be removed."""
1919 Returns None if file doesn't exist and should be removed."""
1920 return self._filectxfn(self._repo, self, path)
1920 return self._filectxfn(self._repo, self, path)
1921
1921
1922 def commit(self):
1922 def commit(self):
1923 """commit context to the repo"""
1923 """commit context to the repo"""
1924 return self._repo.commitctx(self)
1924 return self._repo.commitctx(self)
1925
1925
1926 @propertycache
1926 @propertycache
1927 def _manifest(self):
1927 def _manifest(self):
1928 """generate a manifest based on the return values of filectxfn"""
1928 """generate a manifest based on the return values of filectxfn"""
1929
1929
1930 # keep this simple for now; just worry about p1
1930 # keep this simple for now; just worry about p1
1931 pctx = self._parents[0]
1931 pctx = self._parents[0]
1932 man = pctx.manifest().copy()
1932 man = pctx.manifest().copy()
1933
1933
1934 for f in self._status.modified:
1934 for f in self._status.modified:
1935 p1node = nullid
1935 p1node = nullid
1936 p2node = nullid
1936 p2node = nullid
1937 p = pctx[f].parents() # if file isn't in pctx, check p2?
1937 p = pctx[f].parents() # if file isn't in pctx, check p2?
1938 if len(p) > 0:
1938 if len(p) > 0:
1939 p1node = p[0].filenode()
1939 p1node = p[0].filenode()
1940 if len(p) > 1:
1940 if len(p) > 1:
1941 p2node = p[1].filenode()
1941 p2node = p[1].filenode()
1942 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1942 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1943
1943
1944 for f in self._status.added:
1944 for f in self._status.added:
1945 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1945 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1946
1946
1947 for f in self._status.removed:
1947 for f in self._status.removed:
1948 if f in man:
1948 if f in man:
1949 del man[f]
1949 del man[f]
1950
1950
1951 return man
1951 return man
1952
1952
1953 @propertycache
1953 @propertycache
1954 def _status(self):
1954 def _status(self):
1955 """Calculate exact status from ``files`` specified at construction
1955 """Calculate exact status from ``files`` specified at construction
1956 """
1956 """
1957 man1 = self.p1().manifest()
1957 man1 = self.p1().manifest()
1958 p2 = self._parents[1]
1958 p2 = self._parents[1]
1959 # "1 < len(self._parents)" can't be used for checking
1959 # "1 < len(self._parents)" can't be used for checking
1960 # existence of the 2nd parent, because "memctx._parents" is
1960 # existence of the 2nd parent, because "memctx._parents" is
1961 # explicitly initialized by the list, of which length is 2.
1961 # explicitly initialized by the list, of which length is 2.
1962 if p2.node() != nullid:
1962 if p2.node() != nullid:
1963 man2 = p2.manifest()
1963 man2 = p2.manifest()
1964 managing = lambda f: f in man1 or f in man2
1964 managing = lambda f: f in man1 or f in man2
1965 else:
1965 else:
1966 managing = lambda f: f in man1
1966 managing = lambda f: f in man1
1967
1967
1968 modified, added, removed = [], [], []
1968 modified, added, removed = [], [], []
1969 for f in self._files:
1969 for f in self._files:
1970 if not managing(f):
1970 if not managing(f):
1971 added.append(f)
1971 added.append(f)
1972 elif self[f]:
1972 elif self[f]:
1973 modified.append(f)
1973 modified.append(f)
1974 else:
1974 else:
1975 removed.append(f)
1975 removed.append(f)
1976
1976
1977 return scmutil.status(modified, added, removed, [], [], [], [])
1977 return scmutil.status(modified, added, removed, [], [], [], [])
1978
1978
1979 class memfilectx(committablefilectx):
1979 class memfilectx(committablefilectx):
1980 """memfilectx represents an in-memory file to commit.
1980 """memfilectx represents an in-memory file to commit.
1981
1981
1982 See memctx and committablefilectx for more details.
1982 See memctx and committablefilectx for more details.
1983 """
1983 """
1984 def __init__(self, repo, path, data, islink=False,
1984 def __init__(self, repo, path, data, islink=False,
1985 isexec=False, copied=None, memctx=None):
1985 isexec=False, copied=None, memctx=None):
1986 """
1986 """
1987 path is the normalized file path relative to repository root.
1987 path is the normalized file path relative to repository root.
1988 data is the file content as a string.
1988 data is the file content as a string.
1989 islink is True if the file is a symbolic link.
1989 islink is True if the file is a symbolic link.
1990 isexec is True if the file is executable.
1990 isexec is True if the file is executable.
1991 copied is the source file path if current file was copied in the
1991 copied is the source file path if current file was copied in the
1992 revision being committed, or None."""
1992 revision being committed, or None."""
1993 super(memfilectx, self).__init__(repo, path, None, memctx)
1993 super(memfilectx, self).__init__(repo, path, None, memctx)
1994 self._data = data
1994 self._data = data
1995 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1995 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1996 self._copied = None
1996 self._copied = None
1997 if copied:
1997 if copied:
1998 self._copied = (copied, nullid)
1998 self._copied = (copied, nullid)
1999
1999
2000 def data(self):
2000 def data(self):
2001 return self._data
2001 return self._data
2002 def size(self):
2002 def size(self):
2003 return len(self.data())
2003 return len(self.data())
2004 def flags(self):
2004 def flags(self):
2005 return self._flags
2005 return self._flags
2006 def renamed(self):
2006 def renamed(self):
2007 return self._copied
2007 return self._copied
2008
2008
2009 def remove(self, ignoremissing=False):
2009 def remove(self, ignoremissing=False):
2010 """wraps unlink for a repo's working directory"""
2010 """wraps unlink for a repo's working directory"""
2011 # need to figure out what to do here
2011 # need to figure out what to do here
2012 del self._changectx[self._path]
2012 del self._changectx[self._path]
2013
2013
2014 def write(self, data, flags):
2014 def write(self, data, flags):
2015 """wraps repo.wwrite"""
2015 """wraps repo.wwrite"""
2016 self._data = data
2016 self._data = data
2017
2017
2018 class metadataonlyctx(committablectx):
2018 class metadataonlyctx(committablectx):
2019 """Like memctx but it's reusing the manifest of different commit.
2019 """Like memctx but it's reusing the manifest of different commit.
2020 Intended to be used by lightweight operations that are creating
2020 Intended to be used by lightweight operations that are creating
2021 metadata-only changes.
2021 metadata-only changes.
2022
2022
2023 Revision information is supplied at initialization time. 'repo' is the
2023 Revision information is supplied at initialization time. 'repo' is the
2024 current localrepo, 'ctx' is original revision which manifest we're reuisng
2024 current localrepo, 'ctx' is original revision which manifest we're reuisng
2025 'parents' is a sequence of two parent revisions identifiers (pass None for
2025 'parents' is a sequence of two parent revisions identifiers (pass None for
2026 every missing parent), 'text' is the commit.
2026 every missing parent), 'text' is the commit.
2027
2027
2028 user receives the committer name and defaults to current repository
2028 user receives the committer name and defaults to current repository
2029 username, date is the commit date in any format supported by
2029 username, date is the commit date in any format supported by
2030 util.parsedate() and defaults to current date, extra is a dictionary of
2030 util.parsedate() and defaults to current date, extra is a dictionary of
2031 metadata or is left empty.
2031 metadata or is left empty.
2032 """
2032 """
2033 def __new__(cls, repo, originalctx, *args, **kwargs):
2033 def __new__(cls, repo, originalctx, *args, **kwargs):
2034 return super(metadataonlyctx, cls).__new__(cls, repo)
2034 return super(metadataonlyctx, cls).__new__(cls, repo)
2035
2035
2036 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2036 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2037 extra=None, editor=False):
2037 extra=None, editor=False):
2038 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2038 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2039 self._rev = None
2039 self._rev = None
2040 self._node = None
2040 self._node = None
2041 self._originalctx = originalctx
2041 self._originalctx = originalctx
2042 self._manifestnode = originalctx.manifestnode()
2042 self._manifestnode = originalctx.manifestnode()
2043 parents = [(p or nullid) for p in parents]
2043 parents = [(p or nullid) for p in parents]
2044 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2044 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2045
2045
2046 # sanity check to ensure that the reused manifest parents are
2046 # sanity check to ensure that the reused manifest parents are
2047 # manifests of our commit parents
2047 # manifests of our commit parents
2048 mp1, mp2 = self.manifestctx().parents
2048 mp1, mp2 = self.manifestctx().parents
2049 if p1 != nullid and p1.manifestctx().node() != mp1:
2049 if p1 != nullid and p1.manifestctx().node() != mp1:
2050 raise RuntimeError('can\'t reuse the manifest: '
2050 raise RuntimeError('can\'t reuse the manifest: '
2051 'its p1 doesn\'t match the new ctx p1')
2051 'its p1 doesn\'t match the new ctx p1')
2052 if p2 != nullid and p2.manifestctx().node() != mp2:
2052 if p2 != nullid and p2.manifestctx().node() != mp2:
2053 raise RuntimeError('can\'t reuse the manifest: '
2053 raise RuntimeError('can\'t reuse the manifest: '
2054 'its p2 doesn\'t match the new ctx p2')
2054 'its p2 doesn\'t match the new ctx p2')
2055
2055
2056 self._files = originalctx.files()
2056 self._files = originalctx.files()
2057 self.substate = {}
2057 self.substate = {}
2058
2058
2059 if extra:
2059 if extra:
2060 self._extra = extra.copy()
2060 self._extra = extra.copy()
2061 else:
2061 else:
2062 self._extra = {}
2062 self._extra = {}
2063
2063
2064 if self._extra.get('branch', '') == '':
2064 if self._extra.get('branch', '') == '':
2065 self._extra['branch'] = 'default'
2065 self._extra['branch'] = 'default'
2066
2066
2067 if editor:
2067 if editor:
2068 self._text = editor(self._repo, self, [])
2068 self._text = editor(self._repo, self, [])
2069 self._repo.savecommitmessage(self._text)
2069 self._repo.savecommitmessage(self._text)
2070
2070
2071 def manifestnode(self):
2071 def manifestnode(self):
2072 return self._manifestnode
2072 return self._manifestnode
2073
2073
2074 @propertycache
2074 @propertycache
2075 def _manifestctx(self):
2075 def _manifestctx(self):
2076 return self._repo.manifestlog[self._manifestnode]
2076 return self._repo.manifestlog[self._manifestnode]
2077
2077
2078 def filectx(self, path, filelog=None):
2078 def filectx(self, path, filelog=None):
2079 return self._originalctx.filectx(path, filelog=filelog)
2079 return self._originalctx.filectx(path, filelog=filelog)
2080
2080
2081 def commit(self):
2081 def commit(self):
2082 """commit context to the repo"""
2082 """commit context to the repo"""
2083 return self._repo.commitctx(self)
2083 return self._repo.commitctx(self)
2084
2084
2085 @property
2085 @property
2086 def _manifest(self):
2086 def _manifest(self):
2087 return self._originalctx.manifest()
2087 return self._originalctx.manifest()
2088
2088
2089 @propertycache
2089 @propertycache
2090 def _status(self):
2090 def _status(self):
2091 """Calculate exact status from ``files`` specified in the ``origctx``
2091 """Calculate exact status from ``files`` specified in the ``origctx``
2092 and parents manifests.
2092 and parents manifests.
2093 """
2093 """
2094 man1 = self.p1().manifest()
2094 man1 = self.p1().manifest()
2095 p2 = self._parents[1]
2095 p2 = self._parents[1]
2096 # "1 < len(self._parents)" can't be used for checking
2096 # "1 < len(self._parents)" can't be used for checking
2097 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2097 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2098 # explicitly initialized by the list, of which length is 2.
2098 # explicitly initialized by the list, of which length is 2.
2099 if p2.node() != nullid:
2099 if p2.node() != nullid:
2100 man2 = p2.manifest()
2100 man2 = p2.manifest()
2101 managing = lambda f: f in man1 or f in man2
2101 managing = lambda f: f in man1 or f in man2
2102 else:
2102 else:
2103 managing = lambda f: f in man1
2103 managing = lambda f: f in man1
2104
2104
2105 modified, added, removed = [], [], []
2105 modified, added, removed = [], [], []
2106 for f in self._files:
2106 for f in self._files:
2107 if not managing(f):
2107 if not managing(f):
2108 added.append(f)
2108 added.append(f)
2109 elif self[f]:
2109 elif self[f]:
2110 modified.append(f)
2110 modified.append(f)
2111 else:
2111 else:
2112 removed.append(f)
2112 removed.append(f)
2113
2113
2114 return scmutil.status(modified, added, removed, [], [], [], [])
2114 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now