##// END OF EJS Templates
context: add a `blockancestors(fctx, fromline, toline)` function...
Denis Laxalde -
r30718:ce662ee4 default
parent child Browse files
Show More
@@ -1,2075 +1,2111 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 newnodeid,
21 newnodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 )
26 )
27 from . import (
27 from . import (
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 mdiff,
32 mdiff,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 phases,
35 phases,
36 repoview,
36 repoview,
37 revlog,
37 revlog,
38 scmutil,
38 scmutil,
39 subrepo,
39 subrepo,
40 util,
40 util,
41 )
41 )
42
42
43 propertycache = util.propertycache
43 propertycache = util.propertycache
44
44
45 nonascii = re.compile(r'[^\x21-\x7f]').search
45 nonascii = re.compile(r'[^\x21-\x7f]').search
46
46
47 class basectx(object):
47 class basectx(object):
48 """A basectx object represents the common logic for its children:
48 """A basectx object represents the common logic for its children:
49 changectx: read-only context that is already present in the repo,
49 changectx: read-only context that is already present in the repo,
50 workingctx: a context that represents the working directory and can
50 workingctx: a context that represents the working directory and can
51 be committed,
51 be committed,
52 memctx: a context that represents changes in-memory and can also
52 memctx: a context that represents changes in-memory and can also
53 be committed."""
53 be committed."""
54 def __new__(cls, repo, changeid='', *args, **kwargs):
54 def __new__(cls, repo, changeid='', *args, **kwargs):
55 if isinstance(changeid, basectx):
55 if isinstance(changeid, basectx):
56 return changeid
56 return changeid
57
57
58 o = super(basectx, cls).__new__(cls)
58 o = super(basectx, cls).__new__(cls)
59
59
60 o._repo = repo
60 o._repo = repo
61 o._rev = nullrev
61 o._rev = nullrev
62 o._node = nullid
62 o._node = nullid
63
63
64 return o
64 return o
65
65
66 def __str__(self):
66 def __str__(self):
67 return short(self.node())
67 return short(self.node())
68
68
69 def __int__(self):
69 def __int__(self):
70 return self.rev()
70 return self.rev()
71
71
72 def __repr__(self):
72 def __repr__(self):
73 return "<%s %s>" % (type(self).__name__, str(self))
73 return "<%s %s>" % (type(self).__name__, str(self))
74
74
75 def __eq__(self, other):
75 def __eq__(self, other):
76 try:
76 try:
77 return type(self) == type(other) and self._rev == other._rev
77 return type(self) == type(other) and self._rev == other._rev
78 except AttributeError:
78 except AttributeError:
79 return False
79 return False
80
80
81 def __ne__(self, other):
81 def __ne__(self, other):
82 return not (self == other)
82 return not (self == other)
83
83
84 def __contains__(self, key):
84 def __contains__(self, key):
85 return key in self._manifest
85 return key in self._manifest
86
86
87 def __getitem__(self, key):
87 def __getitem__(self, key):
88 return self.filectx(key)
88 return self.filectx(key)
89
89
90 def __iter__(self):
90 def __iter__(self):
91 return iter(self._manifest)
91 return iter(self._manifest)
92
92
93 def _manifestmatches(self, match, s):
93 def _manifestmatches(self, match, s):
94 """generate a new manifest filtered by the match argument
94 """generate a new manifest filtered by the match argument
95
95
96 This method is for internal use only and mainly exists to provide an
96 This method is for internal use only and mainly exists to provide an
97 object oriented way for other contexts to customize the manifest
97 object oriented way for other contexts to customize the manifest
98 generation.
98 generation.
99 """
99 """
100 return self.manifest().matches(match)
100 return self.manifest().matches(match)
101
101
102 def _matchstatus(self, other, match):
102 def _matchstatus(self, other, match):
103 """return match.always if match is none
103 """return match.always if match is none
104
104
105 This internal method provides a way for child objects to override the
105 This internal method provides a way for child objects to override the
106 match operator.
106 match operator.
107 """
107 """
108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
108 return match or matchmod.always(self._repo.root, self._repo.getcwd())
109
109
110 def _buildstatus(self, other, s, match, listignored, listclean,
110 def _buildstatus(self, other, s, match, listignored, listclean,
111 listunknown):
111 listunknown):
112 """build a status with respect to another context"""
112 """build a status with respect to another context"""
113 # Load earliest manifest first for caching reasons. More specifically,
113 # Load earliest manifest first for caching reasons. More specifically,
114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
114 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
115 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # 1000 and cache it so that when you read 1001, we just need to apply a
116 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # delta to what's in the cache. So that's one full reconstruction + one
117 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta application.
118 # delta application.
119 if self.rev() is not None and self.rev() < other.rev():
119 if self.rev() is not None and self.rev() < other.rev():
120 self.manifest()
120 self.manifest()
121 mf1 = other._manifestmatches(match, s)
121 mf1 = other._manifestmatches(match, s)
122 mf2 = self._manifestmatches(match, s)
122 mf2 = self._manifestmatches(match, s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, clean=listclean)
129 d = mf1.diff(mf2, clean=listclean)
130 for fn, value in d.iteritems():
130 for fn, value in d.iteritems():
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 != newnodeid:
143 elif node2 != newnodeid:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [fn for fn in unknown if fn not in mf1]
156 unknown = [fn for fn in unknown if fn not in mf1]
157 ignored = [fn for fn in ignored if fn not in mf1]
157 ignored = [fn for fn in ignored if fn not in mf1]
158 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
160
160
161 return scmutil.status(modified, added, removed, deleted, unknown,
161 return scmutil.status(modified, added, removed, deleted, unknown,
162 ignored, clean)
162 ignored, clean)
163
163
164 @propertycache
164 @propertycache
165 def substate(self):
165 def substate(self):
166 return subrepo.state(self, self._repo.ui)
166 return subrepo.state(self, self._repo.ui)
167
167
168 def subrev(self, subpath):
168 def subrev(self, subpath):
169 return self.substate[subpath][1]
169 return self.substate[subpath][1]
170
170
171 def rev(self):
171 def rev(self):
172 return self._rev
172 return self._rev
173 def node(self):
173 def node(self):
174 return self._node
174 return self._node
175 def hex(self):
175 def hex(self):
176 return hex(self.node())
176 return hex(self.node())
177 def manifest(self):
177 def manifest(self):
178 return self._manifest
178 return self._manifest
179 def manifestctx(self):
179 def manifestctx(self):
180 return self._manifestctx
180 return self._manifestctx
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
262 mfl = self._repo.manifestlog
263 try:
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
265 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
267 _('not found in manifest'))
268
268
269 return node, flag
269 return node, flag
270
270
271 def filenode(self, path):
271 def filenode(self, path):
272 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
273
273
274 def flags(self, path):
274 def flags(self, path):
275 try:
275 try:
276 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
277 except error.LookupError:
277 except error.LookupError:
278 return ''
278 return ''
279
279
280 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
283
284 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
286
286
287 def workingsub(self, path):
287 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
289 context.
290 '''
290 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
292
292
293 def match(self, pats=[], include=None, exclude=None, default='glob',
293 def match(self, pats=[], include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
295 r = self._repo
295 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
297 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
300
300
301 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
303 if ctx2 is None:
304 ctx2 = self.p1()
304 ctx2 = self.p1()
305 if ctx2 is not None:
305 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, opts)
307 diffopts = patch.diffopts(self._repo.ui, opts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def dirty(self, missing=False, merge=True, branch=True):
316 def dirty(self, missing=False, merge=True, branch=True):
317 return False
317 return False
318
318
319 def status(self, other=None, match=None, listignored=False,
319 def status(self, other=None, match=None, listignored=False,
320 listclean=False, listunknown=False, listsubrepos=False):
320 listclean=False, listunknown=False, listsubrepos=False):
321 """return status of files between two nodes or node and working
321 """return status of files between two nodes or node and working
322 directory.
322 directory.
323
323
324 If other is None, compare this node with working directory.
324 If other is None, compare this node with working directory.
325
325
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
327 """
327 """
328
328
329 ctx1 = self
329 ctx1 = self
330 ctx2 = self._repo[other]
330 ctx2 = self._repo[other]
331
331
332 # This next code block is, admittedly, fragile logic that tests for
332 # This next code block is, admittedly, fragile logic that tests for
333 # reversing the contexts and wouldn't need to exist if it weren't for
333 # reversing the contexts and wouldn't need to exist if it weren't for
334 # the fast (and common) code path of comparing the working directory
334 # the fast (and common) code path of comparing the working directory
335 # with its first parent.
335 # with its first parent.
336 #
336 #
337 # What we're aiming for here is the ability to call:
337 # What we're aiming for here is the ability to call:
338 #
338 #
339 # workingctx.status(parentctx)
339 # workingctx.status(parentctx)
340 #
340 #
341 # If we always built the manifest for each context and compared those,
341 # If we always built the manifest for each context and compared those,
342 # then we'd be done. But the special case of the above call means we
342 # then we'd be done. But the special case of the above call means we
343 # just copy the manifest of the parent.
343 # just copy the manifest of the parent.
344 reversed = False
344 reversed = False
345 if (not isinstance(ctx1, changectx)
345 if (not isinstance(ctx1, changectx)
346 and isinstance(ctx2, changectx)):
346 and isinstance(ctx2, changectx)):
347 reversed = True
347 reversed = True
348 ctx1, ctx2 = ctx2, ctx1
348 ctx1, ctx2 = ctx2, ctx1
349
349
350 match = ctx2._matchstatus(ctx1, match)
350 match = ctx2._matchstatus(ctx1, match)
351 r = scmutil.status([], [], [], [], [], [], [])
351 r = scmutil.status([], [], [], [], [], [], [])
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
353 listunknown)
353 listunknown)
354
354
355 if reversed:
355 if reversed:
356 # Reverse added and removed. Clear deleted, unknown and ignored as
356 # Reverse added and removed. Clear deleted, unknown and ignored as
357 # these make no sense to reverse.
357 # these make no sense to reverse.
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
359 r.clean)
359 r.clean)
360
360
361 if listsubrepos:
361 if listsubrepos:
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
363 try:
363 try:
364 rev2 = ctx2.subrev(subpath)
364 rev2 = ctx2.subrev(subpath)
365 except KeyError:
365 except KeyError:
366 # A subrepo that existed in node1 was deleted between
366 # A subrepo that existed in node1 was deleted between
367 # node1 and node2 (inclusive). Thus, ctx2's substate
367 # node1 and node2 (inclusive). Thus, ctx2's substate
368 # won't contain that subpath. The best we can do ignore it.
368 # won't contain that subpath. The best we can do ignore it.
369 rev2 = None
369 rev2 = None
370 submatch = matchmod.subdirmatcher(subpath, match)
370 submatch = matchmod.subdirmatcher(subpath, match)
371 s = sub.status(rev2, match=submatch, ignored=listignored,
371 s = sub.status(rev2, match=submatch, ignored=listignored,
372 clean=listclean, unknown=listunknown,
372 clean=listclean, unknown=listunknown,
373 listsubrepos=True)
373 listsubrepos=True)
374 for rfiles, sfiles in zip(r, s):
374 for rfiles, sfiles in zip(r, s):
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
376
376
377 for l in r:
377 for l in r:
378 l.sort()
378 l.sort()
379
379
380 return r
380 return r
381
381
382
382
383 def makememctx(repo, parents, text, user, date, branch, files, store,
383 def makememctx(repo, parents, text, user, date, branch, files, store,
384 editor=None, extra=None):
384 editor=None, extra=None):
385 def getfilectx(repo, memctx, path):
385 def getfilectx(repo, memctx, path):
386 data, mode, copied = store.getfile(path)
386 data, mode, copied = store.getfile(path)
387 if data is None:
387 if data is None:
388 return None
388 return None
389 islink, isexec = mode
389 islink, isexec = mode
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
391 copied=copied, memctx=memctx)
391 copied=copied, memctx=memctx)
392 if extra is None:
392 if extra is None:
393 extra = {}
393 extra = {}
394 if branch:
394 if branch:
395 extra['branch'] = encoding.fromlocal(branch)
395 extra['branch'] = encoding.fromlocal(branch)
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
397 date, extra, editor)
397 date, extra, editor)
398 return ctx
398 return ctx
399
399
400 class changectx(basectx):
400 class changectx(basectx):
401 """A changecontext object makes access to data related to a particular
401 """A changecontext object makes access to data related to a particular
402 changeset convenient. It represents a read-only context already present in
402 changeset convenient. It represents a read-only context already present in
403 the repo."""
403 the repo."""
404 def __init__(self, repo, changeid=''):
404 def __init__(self, repo, changeid=''):
405 """changeid is a revision number, node, or tag"""
405 """changeid is a revision number, node, or tag"""
406
406
407 # since basectx.__new__ already took care of copying the object, we
407 # since basectx.__new__ already took care of copying the object, we
408 # don't need to do anything in __init__, so we just exit here
408 # don't need to do anything in __init__, so we just exit here
409 if isinstance(changeid, basectx):
409 if isinstance(changeid, basectx):
410 return
410 return
411
411
412 if changeid == '':
412 if changeid == '':
413 changeid = '.'
413 changeid = '.'
414 self._repo = repo
414 self._repo = repo
415
415
416 try:
416 try:
417 if isinstance(changeid, int):
417 if isinstance(changeid, int):
418 self._node = repo.changelog.node(changeid)
418 self._node = repo.changelog.node(changeid)
419 self._rev = changeid
419 self._rev = changeid
420 return
420 return
421 if isinstance(changeid, long):
421 if isinstance(changeid, long):
422 changeid = str(changeid)
422 changeid = str(changeid)
423 if changeid == 'null':
423 if changeid == 'null':
424 self._node = nullid
424 self._node = nullid
425 self._rev = nullrev
425 self._rev = nullrev
426 return
426 return
427 if changeid == 'tip':
427 if changeid == 'tip':
428 self._node = repo.changelog.tip()
428 self._node = repo.changelog.tip()
429 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
430 return
430 return
431 if changeid == '.' or changeid == repo.dirstate.p1():
431 if changeid == '.' or changeid == repo.dirstate.p1():
432 # this is a hack to delay/avoid loading obsmarkers
432 # this is a hack to delay/avoid loading obsmarkers
433 # when we know that '.' won't be hidden
433 # when we know that '.' won't be hidden
434 self._node = repo.dirstate.p1()
434 self._node = repo.dirstate.p1()
435 self._rev = repo.unfiltered().changelog.rev(self._node)
435 self._rev = repo.unfiltered().changelog.rev(self._node)
436 return
436 return
437 if len(changeid) == 20:
437 if len(changeid) == 20:
438 try:
438 try:
439 self._node = changeid
439 self._node = changeid
440 self._rev = repo.changelog.rev(changeid)
440 self._rev = repo.changelog.rev(changeid)
441 return
441 return
442 except error.FilteredRepoLookupError:
442 except error.FilteredRepoLookupError:
443 raise
443 raise
444 except LookupError:
444 except LookupError:
445 pass
445 pass
446
446
447 try:
447 try:
448 r = int(changeid)
448 r = int(changeid)
449 if str(r) != changeid:
449 if str(r) != changeid:
450 raise ValueError
450 raise ValueError
451 l = len(repo.changelog)
451 l = len(repo.changelog)
452 if r < 0:
452 if r < 0:
453 r += l
453 r += l
454 if r < 0 or r >= l:
454 if r < 0 or r >= l:
455 raise ValueError
455 raise ValueError
456 self._rev = r
456 self._rev = r
457 self._node = repo.changelog.node(r)
457 self._node = repo.changelog.node(r)
458 return
458 return
459 except error.FilteredIndexError:
459 except error.FilteredIndexError:
460 raise
460 raise
461 except (ValueError, OverflowError, IndexError):
461 except (ValueError, OverflowError, IndexError):
462 pass
462 pass
463
463
464 if len(changeid) == 40:
464 if len(changeid) == 40:
465 try:
465 try:
466 self._node = bin(changeid)
466 self._node = bin(changeid)
467 self._rev = repo.changelog.rev(self._node)
467 self._rev = repo.changelog.rev(self._node)
468 return
468 return
469 except error.FilteredLookupError:
469 except error.FilteredLookupError:
470 raise
470 raise
471 except (TypeError, LookupError):
471 except (TypeError, LookupError):
472 pass
472 pass
473
473
474 # lookup bookmarks through the name interface
474 # lookup bookmarks through the name interface
475 try:
475 try:
476 self._node = repo.names.singlenode(repo, changeid)
476 self._node = repo.names.singlenode(repo, changeid)
477 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
478 return
478 return
479 except KeyError:
479 except KeyError:
480 pass
480 pass
481 except error.FilteredRepoLookupError:
481 except error.FilteredRepoLookupError:
482 raise
482 raise
483 except error.RepoLookupError:
483 except error.RepoLookupError:
484 pass
484 pass
485
485
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
487 if self._node is not None:
487 if self._node is not None:
488 self._rev = repo.changelog.rev(self._node)
488 self._rev = repo.changelog.rev(self._node)
489 return
489 return
490
490
491 # lookup failed
491 # lookup failed
492 # check if it might have come from damaged dirstate
492 # check if it might have come from damaged dirstate
493 #
493 #
494 # XXX we could avoid the unfiltered if we had a recognizable
494 # XXX we could avoid the unfiltered if we had a recognizable
495 # exception for filtered changeset access
495 # exception for filtered changeset access
496 if changeid in repo.unfiltered().dirstate.parents():
496 if changeid in repo.unfiltered().dirstate.parents():
497 msg = _("working directory has unknown parent '%s'!")
497 msg = _("working directory has unknown parent '%s'!")
498 raise error.Abort(msg % short(changeid))
498 raise error.Abort(msg % short(changeid))
499 try:
499 try:
500 if len(changeid) == 20 and nonascii(changeid):
500 if len(changeid) == 20 and nonascii(changeid):
501 changeid = hex(changeid)
501 changeid = hex(changeid)
502 except TypeError:
502 except TypeError:
503 pass
503 pass
504 except (error.FilteredIndexError, error.FilteredLookupError,
504 except (error.FilteredIndexError, error.FilteredLookupError,
505 error.FilteredRepoLookupError):
505 error.FilteredRepoLookupError):
506 if repo.filtername.startswith('visible'):
506 if repo.filtername.startswith('visible'):
507 msg = _("hidden revision '%s'") % changeid
507 msg = _("hidden revision '%s'") % changeid
508 hint = _('use --hidden to access hidden revisions')
508 hint = _('use --hidden to access hidden revisions')
509 raise error.FilteredRepoLookupError(msg, hint=hint)
509 raise error.FilteredRepoLookupError(msg, hint=hint)
510 msg = _("filtered revision '%s' (not in '%s' subset)")
510 msg = _("filtered revision '%s' (not in '%s' subset)")
511 msg %= (changeid, repo.filtername)
511 msg %= (changeid, repo.filtername)
512 raise error.FilteredRepoLookupError(msg)
512 raise error.FilteredRepoLookupError(msg)
513 except IndexError:
513 except IndexError:
514 pass
514 pass
515 raise error.RepoLookupError(
515 raise error.RepoLookupError(
516 _("unknown revision '%s'") % changeid)
516 _("unknown revision '%s'") % changeid)
517
517
518 def __hash__(self):
518 def __hash__(self):
519 try:
519 try:
520 return hash(self._rev)
520 return hash(self._rev)
521 except AttributeError:
521 except AttributeError:
522 return id(self)
522 return id(self)
523
523
524 def __nonzero__(self):
524 def __nonzero__(self):
525 return self._rev != nullrev
525 return self._rev != nullrev
526
526
527 @propertycache
527 @propertycache
528 def _changeset(self):
528 def _changeset(self):
529 return self._repo.changelog.changelogrevision(self.rev())
529 return self._repo.changelog.changelogrevision(self.rev())
530
530
531 @propertycache
531 @propertycache
532 def _manifest(self):
532 def _manifest(self):
533 return self._manifestctx.read()
533 return self._manifestctx.read()
534
534
535 @propertycache
535 @propertycache
536 def _manifestctx(self):
536 def _manifestctx(self):
537 return self._repo.manifestlog[self._changeset.manifest]
537 return self._repo.manifestlog[self._changeset.manifest]
538
538
539 @propertycache
539 @propertycache
540 def _manifestdelta(self):
540 def _manifestdelta(self):
541 return self._manifestctx.readdelta()
541 return self._manifestctx.readdelta()
542
542
543 @propertycache
543 @propertycache
544 def _parents(self):
544 def _parents(self):
545 repo = self._repo
545 repo = self._repo
546 p1, p2 = repo.changelog.parentrevs(self._rev)
546 p1, p2 = repo.changelog.parentrevs(self._rev)
547 if p2 == nullrev:
547 if p2 == nullrev:
548 return [changectx(repo, p1)]
548 return [changectx(repo, p1)]
549 return [changectx(repo, p1), changectx(repo, p2)]
549 return [changectx(repo, p1), changectx(repo, p2)]
550
550
551 def changeset(self):
551 def changeset(self):
552 c = self._changeset
552 c = self._changeset
553 return (
553 return (
554 c.manifest,
554 c.manifest,
555 c.user,
555 c.user,
556 c.date,
556 c.date,
557 c.files,
557 c.files,
558 c.description,
558 c.description,
559 c.extra,
559 c.extra,
560 )
560 )
561 def manifestnode(self):
561 def manifestnode(self):
562 return self._changeset.manifest
562 return self._changeset.manifest
563
563
564 def user(self):
564 def user(self):
565 return self._changeset.user
565 return self._changeset.user
566 def date(self):
566 def date(self):
567 return self._changeset.date
567 return self._changeset.date
568 def files(self):
568 def files(self):
569 return self._changeset.files
569 return self._changeset.files
570 def description(self):
570 def description(self):
571 return self._changeset.description
571 return self._changeset.description
572 def branch(self):
572 def branch(self):
573 return encoding.tolocal(self._changeset.extra.get("branch"))
573 return encoding.tolocal(self._changeset.extra.get("branch"))
574 def closesbranch(self):
574 def closesbranch(self):
575 return 'close' in self._changeset.extra
575 return 'close' in self._changeset.extra
576 def extra(self):
576 def extra(self):
577 return self._changeset.extra
577 return self._changeset.extra
578 def tags(self):
578 def tags(self):
579 return self._repo.nodetags(self._node)
579 return self._repo.nodetags(self._node)
580 def bookmarks(self):
580 def bookmarks(self):
581 return self._repo.nodebookmarks(self._node)
581 return self._repo.nodebookmarks(self._node)
582 def phase(self):
582 def phase(self):
583 return self._repo._phasecache.phase(self._repo, self._rev)
583 return self._repo._phasecache.phase(self._repo, self._rev)
584 def hidden(self):
584 def hidden(self):
585 return self._rev in repoview.filterrevs(self._repo, 'visible')
585 return self._rev in repoview.filterrevs(self._repo, 'visible')
586
586
587 def children(self):
587 def children(self):
588 """return contexts for each child changeset"""
588 """return contexts for each child changeset"""
589 c = self._repo.changelog.children(self._node)
589 c = self._repo.changelog.children(self._node)
590 return [changectx(self._repo, x) for x in c]
590 return [changectx(self._repo, x) for x in c]
591
591
592 def ancestors(self):
592 def ancestors(self):
593 for a in self._repo.changelog.ancestors([self._rev]):
593 for a in self._repo.changelog.ancestors([self._rev]):
594 yield changectx(self._repo, a)
594 yield changectx(self._repo, a)
595
595
596 def descendants(self):
596 def descendants(self):
597 for d in self._repo.changelog.descendants([self._rev]):
597 for d in self._repo.changelog.descendants([self._rev]):
598 yield changectx(self._repo, d)
598 yield changectx(self._repo, d)
599
599
600 def filectx(self, path, fileid=None, filelog=None):
600 def filectx(self, path, fileid=None, filelog=None):
601 """get a file context from this changeset"""
601 """get a file context from this changeset"""
602 if fileid is None:
602 if fileid is None:
603 fileid = self.filenode(path)
603 fileid = self.filenode(path)
604 return filectx(self._repo, path, fileid=fileid,
604 return filectx(self._repo, path, fileid=fileid,
605 changectx=self, filelog=filelog)
605 changectx=self, filelog=filelog)
606
606
607 def ancestor(self, c2, warn=False):
607 def ancestor(self, c2, warn=False):
608 """return the "best" ancestor context of self and c2
608 """return the "best" ancestor context of self and c2
609
609
610 If there are multiple candidates, it will show a message and check
610 If there are multiple candidates, it will show a message and check
611 merge.preferancestor configuration before falling back to the
611 merge.preferancestor configuration before falling back to the
612 revlog ancestor."""
612 revlog ancestor."""
613 # deal with workingctxs
613 # deal with workingctxs
614 n2 = c2._node
614 n2 = c2._node
615 if n2 is None:
615 if n2 is None:
616 n2 = c2._parents[0]._node
616 n2 = c2._parents[0]._node
617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
617 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
618 if not cahs:
618 if not cahs:
619 anc = nullid
619 anc = nullid
620 elif len(cahs) == 1:
620 elif len(cahs) == 1:
621 anc = cahs[0]
621 anc = cahs[0]
622 else:
622 else:
623 # experimental config: merge.preferancestor
623 # experimental config: merge.preferancestor
624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
624 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
625 try:
625 try:
626 ctx = changectx(self._repo, r)
626 ctx = changectx(self._repo, r)
627 except error.RepoLookupError:
627 except error.RepoLookupError:
628 continue
628 continue
629 anc = ctx.node()
629 anc = ctx.node()
630 if anc in cahs:
630 if anc in cahs:
631 break
631 break
632 else:
632 else:
633 anc = self._repo.changelog.ancestor(self._node, n2)
633 anc = self._repo.changelog.ancestor(self._node, n2)
634 if warn:
634 if warn:
635 self._repo.ui.status(
635 self._repo.ui.status(
636 (_("note: using %s as ancestor of %s and %s\n") %
636 (_("note: using %s as ancestor of %s and %s\n") %
637 (short(anc), short(self._node), short(n2))) +
637 (short(anc), short(self._node), short(n2))) +
638 ''.join(_(" alternatively, use --config "
638 ''.join(_(" alternatively, use --config "
639 "merge.preferancestor=%s\n") %
639 "merge.preferancestor=%s\n") %
640 short(n) for n in sorted(cahs) if n != anc))
640 short(n) for n in sorted(cahs) if n != anc))
641 return changectx(self._repo, anc)
641 return changectx(self._repo, anc)
642
642
643 def descendant(self, other):
643 def descendant(self, other):
644 """True if other is descendant of this changeset"""
644 """True if other is descendant of this changeset"""
645 return self._repo.changelog.descendant(self._rev, other._rev)
645 return self._repo.changelog.descendant(self._rev, other._rev)
646
646
647 def walk(self, match):
647 def walk(self, match):
648 '''Generates matching file names.'''
648 '''Generates matching file names.'''
649
649
650 # Wrap match.bad method to have message with nodeid
650 # Wrap match.bad method to have message with nodeid
651 def bad(fn, msg):
651 def bad(fn, msg):
652 # The manifest doesn't know about subrepos, so don't complain about
652 # The manifest doesn't know about subrepos, so don't complain about
653 # paths into valid subrepos.
653 # paths into valid subrepos.
654 if any(fn == s or fn.startswith(s + '/')
654 if any(fn == s or fn.startswith(s + '/')
655 for s in self.substate):
655 for s in self.substate):
656 return
656 return
657 match.bad(fn, _('no such file in rev %s') % self)
657 match.bad(fn, _('no such file in rev %s') % self)
658
658
659 m = matchmod.badmatch(match, bad)
659 m = matchmod.badmatch(match, bad)
660 return self._manifest.walk(m)
660 return self._manifest.walk(m)
661
661
662 def matches(self, match):
662 def matches(self, match):
663 return self.walk(match)
663 return self.walk(match)
664
664
665 class basefilectx(object):
665 class basefilectx(object):
666 """A filecontext object represents the common logic for its children:
666 """A filecontext object represents the common logic for its children:
667 filectx: read-only access to a filerevision that is already present
667 filectx: read-only access to a filerevision that is already present
668 in the repo,
668 in the repo,
669 workingfilectx: a filecontext that represents files from the working
669 workingfilectx: a filecontext that represents files from the working
670 directory,
670 directory,
671 memfilectx: a filecontext that represents files in-memory."""
671 memfilectx: a filecontext that represents files in-memory."""
672 def __new__(cls, repo, path, *args, **kwargs):
672 def __new__(cls, repo, path, *args, **kwargs):
673 return super(basefilectx, cls).__new__(cls)
673 return super(basefilectx, cls).__new__(cls)
674
674
675 @propertycache
675 @propertycache
676 def _filelog(self):
676 def _filelog(self):
677 return self._repo.file(self._path)
677 return self._repo.file(self._path)
678
678
679 @propertycache
679 @propertycache
680 def _changeid(self):
680 def _changeid(self):
681 if '_changeid' in self.__dict__:
681 if '_changeid' in self.__dict__:
682 return self._changeid
682 return self._changeid
683 elif '_changectx' in self.__dict__:
683 elif '_changectx' in self.__dict__:
684 return self._changectx.rev()
684 return self._changectx.rev()
685 elif '_descendantrev' in self.__dict__:
685 elif '_descendantrev' in self.__dict__:
686 # this file context was created from a revision with a known
686 # this file context was created from a revision with a known
687 # descendant, we can (lazily) correct for linkrev aliases
687 # descendant, we can (lazily) correct for linkrev aliases
688 return self._adjustlinkrev(self._descendantrev)
688 return self._adjustlinkrev(self._descendantrev)
689 else:
689 else:
690 return self._filelog.linkrev(self._filerev)
690 return self._filelog.linkrev(self._filerev)
691
691
692 @propertycache
692 @propertycache
693 def _filenode(self):
693 def _filenode(self):
694 if '_fileid' in self.__dict__:
694 if '_fileid' in self.__dict__:
695 return self._filelog.lookup(self._fileid)
695 return self._filelog.lookup(self._fileid)
696 else:
696 else:
697 return self._changectx.filenode(self._path)
697 return self._changectx.filenode(self._path)
698
698
699 @propertycache
699 @propertycache
700 def _filerev(self):
700 def _filerev(self):
701 return self._filelog.rev(self._filenode)
701 return self._filelog.rev(self._filenode)
702
702
703 @propertycache
703 @propertycache
704 def _repopath(self):
704 def _repopath(self):
705 return self._path
705 return self._path
706
706
707 def __nonzero__(self):
707 def __nonzero__(self):
708 try:
708 try:
709 self._filenode
709 self._filenode
710 return True
710 return True
711 except error.LookupError:
711 except error.LookupError:
712 # file is missing
712 # file is missing
713 return False
713 return False
714
714
715 def __str__(self):
715 def __str__(self):
716 try:
716 try:
717 return "%s@%s" % (self.path(), self._changectx)
717 return "%s@%s" % (self.path(), self._changectx)
718 except error.LookupError:
718 except error.LookupError:
719 return "%s@???" % self.path()
719 return "%s@???" % self.path()
720
720
721 def __repr__(self):
721 def __repr__(self):
722 return "<%s %s>" % (type(self).__name__, str(self))
722 return "<%s %s>" % (type(self).__name__, str(self))
723
723
724 def __hash__(self):
724 def __hash__(self):
725 try:
725 try:
726 return hash((self._path, self._filenode))
726 return hash((self._path, self._filenode))
727 except AttributeError:
727 except AttributeError:
728 return id(self)
728 return id(self)
729
729
730 def __eq__(self, other):
730 def __eq__(self, other):
731 try:
731 try:
732 return (type(self) == type(other) and self._path == other._path
732 return (type(self) == type(other) and self._path == other._path
733 and self._filenode == other._filenode)
733 and self._filenode == other._filenode)
734 except AttributeError:
734 except AttributeError:
735 return False
735 return False
736
736
737 def __ne__(self, other):
737 def __ne__(self, other):
738 return not (self == other)
738 return not (self == other)
739
739
740 def filerev(self):
740 def filerev(self):
741 return self._filerev
741 return self._filerev
742 def filenode(self):
742 def filenode(self):
743 return self._filenode
743 return self._filenode
744 def flags(self):
744 def flags(self):
745 return self._changectx.flags(self._path)
745 return self._changectx.flags(self._path)
746 def filelog(self):
746 def filelog(self):
747 return self._filelog
747 return self._filelog
748 def rev(self):
748 def rev(self):
749 return self._changeid
749 return self._changeid
750 def linkrev(self):
750 def linkrev(self):
751 return self._filelog.linkrev(self._filerev)
751 return self._filelog.linkrev(self._filerev)
752 def node(self):
752 def node(self):
753 return self._changectx.node()
753 return self._changectx.node()
754 def hex(self):
754 def hex(self):
755 return self._changectx.hex()
755 return self._changectx.hex()
756 def user(self):
756 def user(self):
757 return self._changectx.user()
757 return self._changectx.user()
758 def date(self):
758 def date(self):
759 return self._changectx.date()
759 return self._changectx.date()
760 def files(self):
760 def files(self):
761 return self._changectx.files()
761 return self._changectx.files()
762 def description(self):
762 def description(self):
763 return self._changectx.description()
763 return self._changectx.description()
764 def branch(self):
764 def branch(self):
765 return self._changectx.branch()
765 return self._changectx.branch()
766 def extra(self):
766 def extra(self):
767 return self._changectx.extra()
767 return self._changectx.extra()
768 def phase(self):
768 def phase(self):
769 return self._changectx.phase()
769 return self._changectx.phase()
770 def phasestr(self):
770 def phasestr(self):
771 return self._changectx.phasestr()
771 return self._changectx.phasestr()
772 def manifest(self):
772 def manifest(self):
773 return self._changectx.manifest()
773 return self._changectx.manifest()
774 def changectx(self):
774 def changectx(self):
775 return self._changectx
775 return self._changectx
776 def repo(self):
776 def repo(self):
777 return self._repo
777 return self._repo
778
778
779 def path(self):
779 def path(self):
780 return self._path
780 return self._path
781
781
782 def isbinary(self):
782 def isbinary(self):
783 try:
783 try:
784 return util.binary(self.data())
784 return util.binary(self.data())
785 except IOError:
785 except IOError:
786 return False
786 return False
787 def isexec(self):
787 def isexec(self):
788 return 'x' in self.flags()
788 return 'x' in self.flags()
789 def islink(self):
789 def islink(self):
790 return 'l' in self.flags()
790 return 'l' in self.flags()
791
791
792 def isabsent(self):
792 def isabsent(self):
793 """whether this filectx represents a file not in self._changectx
793 """whether this filectx represents a file not in self._changectx
794
794
795 This is mainly for merge code to detect change/delete conflicts. This is
795 This is mainly for merge code to detect change/delete conflicts. This is
796 expected to be True for all subclasses of basectx."""
796 expected to be True for all subclasses of basectx."""
797 return False
797 return False
798
798
799 _customcmp = False
799 _customcmp = False
800 def cmp(self, fctx):
800 def cmp(self, fctx):
801 """compare with other file context
801 """compare with other file context
802
802
803 returns True if different than fctx.
803 returns True if different than fctx.
804 """
804 """
805 if fctx._customcmp:
805 if fctx._customcmp:
806 return fctx.cmp(self)
806 return fctx.cmp(self)
807
807
808 if (fctx._filenode is None
808 if (fctx._filenode is None
809 and (self._repo._encodefilterpats
809 and (self._repo._encodefilterpats
810 # if file data starts with '\1\n', empty metadata block is
810 # if file data starts with '\1\n', empty metadata block is
811 # prepended, which adds 4 bytes to filelog.size().
811 # prepended, which adds 4 bytes to filelog.size().
812 or self.size() - 4 == fctx.size())
812 or self.size() - 4 == fctx.size())
813 or self.size() == fctx.size()):
813 or self.size() == fctx.size()):
814 return self._filelog.cmp(self._filenode, fctx.data())
814 return self._filelog.cmp(self._filenode, fctx.data())
815
815
816 return True
816 return True
817
817
818 def _adjustlinkrev(self, srcrev, inclusive=False):
818 def _adjustlinkrev(self, srcrev, inclusive=False):
819 """return the first ancestor of <srcrev> introducing <fnode>
819 """return the first ancestor of <srcrev> introducing <fnode>
820
820
821 If the linkrev of the file revision does not point to an ancestor of
821 If the linkrev of the file revision does not point to an ancestor of
822 srcrev, we'll walk down the ancestors until we find one introducing
822 srcrev, we'll walk down the ancestors until we find one introducing
823 this file revision.
823 this file revision.
824
824
825 :srcrev: the changeset revision we search ancestors from
825 :srcrev: the changeset revision we search ancestors from
826 :inclusive: if true, the src revision will also be checked
826 :inclusive: if true, the src revision will also be checked
827 """
827 """
828 repo = self._repo
828 repo = self._repo
829 cl = repo.unfiltered().changelog
829 cl = repo.unfiltered().changelog
830 mfl = repo.manifestlog
830 mfl = repo.manifestlog
831 # fetch the linkrev
831 # fetch the linkrev
832 lkr = self.linkrev()
832 lkr = self.linkrev()
833 # hack to reuse ancestor computation when searching for renames
833 # hack to reuse ancestor computation when searching for renames
834 memberanc = getattr(self, '_ancestrycontext', None)
834 memberanc = getattr(self, '_ancestrycontext', None)
835 iteranc = None
835 iteranc = None
836 if srcrev is None:
836 if srcrev is None:
837 # wctx case, used by workingfilectx during mergecopy
837 # wctx case, used by workingfilectx during mergecopy
838 revs = [p.rev() for p in self._repo[None].parents()]
838 revs = [p.rev() for p in self._repo[None].parents()]
839 inclusive = True # we skipped the real (revless) source
839 inclusive = True # we skipped the real (revless) source
840 else:
840 else:
841 revs = [srcrev]
841 revs = [srcrev]
842 if memberanc is None:
842 if memberanc is None:
843 memberanc = iteranc = cl.ancestors(revs, lkr,
843 memberanc = iteranc = cl.ancestors(revs, lkr,
844 inclusive=inclusive)
844 inclusive=inclusive)
845 # check if this linkrev is an ancestor of srcrev
845 # check if this linkrev is an ancestor of srcrev
846 if lkr not in memberanc:
846 if lkr not in memberanc:
847 if iteranc is None:
847 if iteranc is None:
848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
848 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
849 fnode = self._filenode
849 fnode = self._filenode
850 path = self._path
850 path = self._path
851 for a in iteranc:
851 for a in iteranc:
852 ac = cl.read(a) # get changeset data (we avoid object creation)
852 ac = cl.read(a) # get changeset data (we avoid object creation)
853 if path in ac[3]: # checking the 'files' field.
853 if path in ac[3]: # checking the 'files' field.
854 # The file has been touched, check if the content is
854 # The file has been touched, check if the content is
855 # similar to the one we search for.
855 # similar to the one we search for.
856 if fnode == mfl[ac[0]].readfast().get(path):
856 if fnode == mfl[ac[0]].readfast().get(path):
857 return a
857 return a
858 # In theory, we should never get out of that loop without a result.
858 # In theory, we should never get out of that loop without a result.
859 # But if manifest uses a buggy file revision (not children of the
859 # But if manifest uses a buggy file revision (not children of the
860 # one it replaces) we could. Such a buggy situation will likely
860 # one it replaces) we could. Such a buggy situation will likely
861 # result is crash somewhere else at to some point.
861 # result is crash somewhere else at to some point.
862 return lkr
862 return lkr
863
863
864 def introrev(self):
864 def introrev(self):
865 """return the rev of the changeset which introduced this file revision
865 """return the rev of the changeset which introduced this file revision
866
866
867 This method is different from linkrev because it take into account the
867 This method is different from linkrev because it take into account the
868 changeset the filectx was created from. It ensures the returned
868 changeset the filectx was created from. It ensures the returned
869 revision is one of its ancestors. This prevents bugs from
869 revision is one of its ancestors. This prevents bugs from
870 'linkrev-shadowing' when a file revision is used by multiple
870 'linkrev-shadowing' when a file revision is used by multiple
871 changesets.
871 changesets.
872 """
872 """
873 lkr = self.linkrev()
873 lkr = self.linkrev()
874 attrs = vars(self)
874 attrs = vars(self)
875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
875 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
876 if noctx or self.rev() == lkr:
876 if noctx or self.rev() == lkr:
877 return self.linkrev()
877 return self.linkrev()
878 return self._adjustlinkrev(self.rev(), inclusive=True)
878 return self._adjustlinkrev(self.rev(), inclusive=True)
879
879
880 def _parentfilectx(self, path, fileid, filelog):
880 def _parentfilectx(self, path, fileid, filelog):
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
883 if '_changeid' in vars(self) or '_changectx' in vars(self):
884 # If self is associated with a changeset (probably explicitly
884 # If self is associated with a changeset (probably explicitly
885 # fed), ensure the created filectx is associated with a
885 # fed), ensure the created filectx is associated with a
886 # changeset that is an ancestor of self.changectx.
886 # changeset that is an ancestor of self.changectx.
887 # This lets us later use _adjustlinkrev to get a correct link.
887 # This lets us later use _adjustlinkrev to get a correct link.
888 fctx._descendantrev = self.rev()
888 fctx._descendantrev = self.rev()
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 elif '_descendantrev' in vars(self):
890 elif '_descendantrev' in vars(self):
891 # Otherwise propagate _descendantrev if we have one associated.
891 # Otherwise propagate _descendantrev if we have one associated.
892 fctx._descendantrev = self._descendantrev
892 fctx._descendantrev = self._descendantrev
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 return fctx
894 return fctx
895
895
896 def parents(self):
896 def parents(self):
897 _path = self._path
897 _path = self._path
898 fl = self._filelog
898 fl = self._filelog
899 parents = self._filelog.parents(self._filenode)
899 parents = self._filelog.parents(self._filenode)
900 pl = [(_path, node, fl) for node in parents if node != nullid]
900 pl = [(_path, node, fl) for node in parents if node != nullid]
901
901
902 r = fl.renamed(self._filenode)
902 r = fl.renamed(self._filenode)
903 if r:
903 if r:
904 # - In the simple rename case, both parent are nullid, pl is empty.
904 # - In the simple rename case, both parent are nullid, pl is empty.
905 # - In case of merge, only one of the parent is null id and should
905 # - In case of merge, only one of the parent is null id and should
906 # be replaced with the rename information. This parent is -always-
906 # be replaced with the rename information. This parent is -always-
907 # the first one.
907 # the first one.
908 #
908 #
909 # As null id have always been filtered out in the previous list
909 # As null id have always been filtered out in the previous list
910 # comprehension, inserting to 0 will always result in "replacing
910 # comprehension, inserting to 0 will always result in "replacing
911 # first nullid parent with rename information.
911 # first nullid parent with rename information.
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913
913
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915
915
916 def p1(self):
916 def p1(self):
917 return self.parents()[0]
917 return self.parents()[0]
918
918
919 def p2(self):
919 def p2(self):
920 p = self.parents()
920 p = self.parents()
921 if len(p) == 2:
921 if len(p) == 2:
922 return p[1]
922 return p[1]
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924
924
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
925 def annotate(self, follow=False, linenumber=False, diffopts=None):
926 '''returns a list of tuples of ((ctx, number), line) for each line
926 '''returns a list of tuples of ((ctx, number), line) for each line
927 in the file, where ctx is the filectx of the node where
927 in the file, where ctx is the filectx of the node where
928 that line was last changed; if linenumber parameter is true, number is
928 that line was last changed; if linenumber parameter is true, number is
929 the line number at the first appearance in the managed file, otherwise,
929 the line number at the first appearance in the managed file, otherwise,
930 number has a fixed value of False.
930 number has a fixed value of False.
931 '''
931 '''
932
932
933 def lines(text):
933 def lines(text):
934 if text.endswith("\n"):
934 if text.endswith("\n"):
935 return text.count("\n")
935 return text.count("\n")
936 return text.count("\n") + int(bool(text))
936 return text.count("\n") + int(bool(text))
937
937
938 if linenumber:
938 if linenumber:
939 def decorate(text, rev):
939 def decorate(text, rev):
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
940 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
941 else:
941 else:
942 def decorate(text, rev):
942 def decorate(text, rev):
943 return ([(rev, False)] * lines(text), text)
943 return ([(rev, False)] * lines(text), text)
944
944
945 def pair(parent, child):
945 def pair(parent, child):
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
946 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
947 for (a1, a2, b1, b2), t in blocks:
947 for (a1, a2, b1, b2), t in blocks:
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
948 # Changed blocks ('!') or blocks made only of blank lines ('~')
949 # belong to the child.
949 # belong to the child.
950 if t == '=':
950 if t == '=':
951 child[0][b1:b2] = parent[0][a1:a2]
951 child[0][b1:b2] = parent[0][a1:a2]
952 return child
952 return child
953
953
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
954 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
955
955
956 def parents(f):
956 def parents(f):
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
957 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
958 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
959 # from the topmost introrev (= srcrev) down to p.linkrev() if it
960 # isn't an ancestor of the srcrev.
960 # isn't an ancestor of the srcrev.
961 f._changeid
961 f._changeid
962 pl = f.parents()
962 pl = f.parents()
963
963
964 # Don't return renamed parents if we aren't following.
964 # Don't return renamed parents if we aren't following.
965 if not follow:
965 if not follow:
966 pl = [p for p in pl if p.path() == f.path()]
966 pl = [p for p in pl if p.path() == f.path()]
967
967
968 # renamed filectx won't have a filelog yet, so set it
968 # renamed filectx won't have a filelog yet, so set it
969 # from the cache to save time
969 # from the cache to save time
970 for p in pl:
970 for p in pl:
971 if not '_filelog' in p.__dict__:
971 if not '_filelog' in p.__dict__:
972 p._filelog = getlog(p.path())
972 p._filelog = getlog(p.path())
973
973
974 return pl
974 return pl
975
975
976 # use linkrev to find the first changeset where self appeared
976 # use linkrev to find the first changeset where self appeared
977 base = self
977 base = self
978 introrev = self.introrev()
978 introrev = self.introrev()
979 if self.rev() != introrev:
979 if self.rev() != introrev:
980 base = self.filectx(self.filenode(), changeid=introrev)
980 base = self.filectx(self.filenode(), changeid=introrev)
981 if getattr(base, '_ancestrycontext', None) is None:
981 if getattr(base, '_ancestrycontext', None) is None:
982 cl = self._repo.changelog
982 cl = self._repo.changelog
983 if introrev is None:
983 if introrev is None:
984 # wctx is not inclusive, but works because _ancestrycontext
984 # wctx is not inclusive, but works because _ancestrycontext
985 # is used to test filelog revisions
985 # is used to test filelog revisions
986 ac = cl.ancestors([p.rev() for p in base.parents()],
986 ac = cl.ancestors([p.rev() for p in base.parents()],
987 inclusive=True)
987 inclusive=True)
988 else:
988 else:
989 ac = cl.ancestors([introrev], inclusive=True)
989 ac = cl.ancestors([introrev], inclusive=True)
990 base._ancestrycontext = ac
990 base._ancestrycontext = ac
991
991
992 # This algorithm would prefer to be recursive, but Python is a
992 # This algorithm would prefer to be recursive, but Python is a
993 # bit recursion-hostile. Instead we do an iterative
993 # bit recursion-hostile. Instead we do an iterative
994 # depth-first search.
994 # depth-first search.
995
995
996 # 1st DFS pre-calculates pcache and needed
996 # 1st DFS pre-calculates pcache and needed
997 visit = [base]
997 visit = [base]
998 pcache = {}
998 pcache = {}
999 needed = {base: 1}
999 needed = {base: 1}
1000 while visit:
1000 while visit:
1001 f = visit.pop()
1001 f = visit.pop()
1002 if f in pcache:
1002 if f in pcache:
1003 continue
1003 continue
1004 pl = parents(f)
1004 pl = parents(f)
1005 pcache[f] = pl
1005 pcache[f] = pl
1006 for p in pl:
1006 for p in pl:
1007 needed[p] = needed.get(p, 0) + 1
1007 needed[p] = needed.get(p, 0) + 1
1008 if p not in pcache:
1008 if p not in pcache:
1009 visit.append(p)
1009 visit.append(p)
1010
1010
1011 # 2nd DFS does the actual annotate
1011 # 2nd DFS does the actual annotate
1012 visit[:] = [base]
1012 visit[:] = [base]
1013 hist = {}
1013 hist = {}
1014 while visit:
1014 while visit:
1015 f = visit[-1]
1015 f = visit[-1]
1016 if f in hist:
1016 if f in hist:
1017 visit.pop()
1017 visit.pop()
1018 continue
1018 continue
1019
1019
1020 ready = True
1020 ready = True
1021 pl = pcache[f]
1021 pl = pcache[f]
1022 for p in pl:
1022 for p in pl:
1023 if p not in hist:
1023 if p not in hist:
1024 ready = False
1024 ready = False
1025 visit.append(p)
1025 visit.append(p)
1026 if ready:
1026 if ready:
1027 visit.pop()
1027 visit.pop()
1028 curr = decorate(f.data(), f)
1028 curr = decorate(f.data(), f)
1029 for p in pl:
1029 for p in pl:
1030 curr = pair(hist[p], curr)
1030 curr = pair(hist[p], curr)
1031 if needed[p] == 1:
1031 if needed[p] == 1:
1032 del hist[p]
1032 del hist[p]
1033 del needed[p]
1033 del needed[p]
1034 else:
1034 else:
1035 needed[p] -= 1
1035 needed[p] -= 1
1036
1036
1037 hist[f] = curr
1037 hist[f] = curr
1038 del pcache[f]
1038 del pcache[f]
1039
1039
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1041
1041
1042 def ancestors(self, followfirst=False):
1042 def ancestors(self, followfirst=False):
1043 visit = {}
1043 visit = {}
1044 c = self
1044 c = self
1045 if followfirst:
1045 if followfirst:
1046 cut = 1
1046 cut = 1
1047 else:
1047 else:
1048 cut = None
1048 cut = None
1049
1049
1050 while True:
1050 while True:
1051 for parent in c.parents()[:cut]:
1051 for parent in c.parents()[:cut]:
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1052 visit[(parent.linkrev(), parent.filenode())] = parent
1053 if not visit:
1053 if not visit:
1054 break
1054 break
1055 c = visit.pop(max(visit))
1055 c = visit.pop(max(visit))
1056 yield c
1056 yield c
1057
1057
1058 class filectx(basefilectx):
1058 class filectx(basefilectx):
1059 """A filecontext object makes access to data related to a particular
1059 """A filecontext object makes access to data related to a particular
1060 filerevision convenient."""
1060 filerevision convenient."""
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1061 def __init__(self, repo, path, changeid=None, fileid=None,
1062 filelog=None, changectx=None):
1062 filelog=None, changectx=None):
1063 """changeid can be a changeset revision, node, or tag.
1063 """changeid can be a changeset revision, node, or tag.
1064 fileid can be a file revision or node."""
1064 fileid can be a file revision or node."""
1065 self._repo = repo
1065 self._repo = repo
1066 self._path = path
1066 self._path = path
1067
1067
1068 assert (changeid is not None
1068 assert (changeid is not None
1069 or fileid is not None
1069 or fileid is not None
1070 or changectx is not None), \
1070 or changectx is not None), \
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1071 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1072 % (changeid, fileid, changectx))
1072 % (changeid, fileid, changectx))
1073
1073
1074 if filelog is not None:
1074 if filelog is not None:
1075 self._filelog = filelog
1075 self._filelog = filelog
1076
1076
1077 if changeid is not None:
1077 if changeid is not None:
1078 self._changeid = changeid
1078 self._changeid = changeid
1079 if changectx is not None:
1079 if changectx is not None:
1080 self._changectx = changectx
1080 self._changectx = changectx
1081 if fileid is not None:
1081 if fileid is not None:
1082 self._fileid = fileid
1082 self._fileid = fileid
1083
1083
1084 @propertycache
1084 @propertycache
1085 def _changectx(self):
1085 def _changectx(self):
1086 try:
1086 try:
1087 return changectx(self._repo, self._changeid)
1087 return changectx(self._repo, self._changeid)
1088 except error.FilteredRepoLookupError:
1088 except error.FilteredRepoLookupError:
1089 # Linkrev may point to any revision in the repository. When the
1089 # Linkrev may point to any revision in the repository. When the
1090 # repository is filtered this may lead to `filectx` trying to build
1090 # repository is filtered this may lead to `filectx` trying to build
1091 # `changectx` for filtered revision. In such case we fallback to
1091 # `changectx` for filtered revision. In such case we fallback to
1092 # creating `changectx` on the unfiltered version of the reposition.
1092 # creating `changectx` on the unfiltered version of the reposition.
1093 # This fallback should not be an issue because `changectx` from
1093 # This fallback should not be an issue because `changectx` from
1094 # `filectx` are not used in complex operations that care about
1094 # `filectx` are not used in complex operations that care about
1095 # filtering.
1095 # filtering.
1096 #
1096 #
1097 # This fallback is a cheap and dirty fix that prevent several
1097 # This fallback is a cheap and dirty fix that prevent several
1098 # crashes. It does not ensure the behavior is correct. However the
1098 # crashes. It does not ensure the behavior is correct. However the
1099 # behavior was not correct before filtering either and "incorrect
1099 # behavior was not correct before filtering either and "incorrect
1100 # behavior" is seen as better as "crash"
1100 # behavior" is seen as better as "crash"
1101 #
1101 #
1102 # Linkrevs have several serious troubles with filtering that are
1102 # Linkrevs have several serious troubles with filtering that are
1103 # complicated to solve. Proper handling of the issue here should be
1103 # complicated to solve. Proper handling of the issue here should be
1104 # considered when solving linkrev issue are on the table.
1104 # considered when solving linkrev issue are on the table.
1105 return changectx(self._repo.unfiltered(), self._changeid)
1105 return changectx(self._repo.unfiltered(), self._changeid)
1106
1106
1107 def filectx(self, fileid, changeid=None):
1107 def filectx(self, fileid, changeid=None):
1108 '''opens an arbitrary revision of the file without
1108 '''opens an arbitrary revision of the file without
1109 opening a new filelog'''
1109 opening a new filelog'''
1110 return filectx(self._repo, self._path, fileid=fileid,
1110 return filectx(self._repo, self._path, fileid=fileid,
1111 filelog=self._filelog, changeid=changeid)
1111 filelog=self._filelog, changeid=changeid)
1112
1112
1113 def data(self):
1113 def data(self):
1114 try:
1114 try:
1115 return self._filelog.read(self._filenode)
1115 return self._filelog.read(self._filenode)
1116 except error.CensoredNodeError:
1116 except error.CensoredNodeError:
1117 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1117 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1118 return ""
1118 return ""
1119 raise error.Abort(_("censored node: %s") % short(self._filenode),
1119 raise error.Abort(_("censored node: %s") % short(self._filenode),
1120 hint=_("set censor.policy to ignore errors"))
1120 hint=_("set censor.policy to ignore errors"))
1121
1121
1122 def size(self):
1122 def size(self):
1123 return self._filelog.size(self._filerev)
1123 return self._filelog.size(self._filerev)
1124
1124
1125 def renamed(self):
1125 def renamed(self):
1126 """check if file was actually renamed in this changeset revision
1126 """check if file was actually renamed in this changeset revision
1127
1127
1128 If rename logged in file revision, we report copy for changeset only
1128 If rename logged in file revision, we report copy for changeset only
1129 if file revisions linkrev points back to the changeset in question
1129 if file revisions linkrev points back to the changeset in question
1130 or both changeset parents contain different file revisions.
1130 or both changeset parents contain different file revisions.
1131 """
1131 """
1132
1132
1133 renamed = self._filelog.renamed(self._filenode)
1133 renamed = self._filelog.renamed(self._filenode)
1134 if not renamed:
1134 if not renamed:
1135 return renamed
1135 return renamed
1136
1136
1137 if self.rev() == self.linkrev():
1137 if self.rev() == self.linkrev():
1138 return renamed
1138 return renamed
1139
1139
1140 name = self.path()
1140 name = self.path()
1141 fnode = self._filenode
1141 fnode = self._filenode
1142 for p in self._changectx.parents():
1142 for p in self._changectx.parents():
1143 try:
1143 try:
1144 if fnode == p.filenode(name):
1144 if fnode == p.filenode(name):
1145 return None
1145 return None
1146 except error.LookupError:
1146 except error.LookupError:
1147 pass
1147 pass
1148 return renamed
1148 return renamed
1149
1149
1150 def children(self):
1150 def children(self):
1151 # hard for renames
1151 # hard for renames
1152 c = self._filelog.children(self._filenode)
1152 c = self._filelog.children(self._filenode)
1153 return [filectx(self._repo, self._path, fileid=x,
1153 return [filectx(self._repo, self._path, fileid=x,
1154 filelog=self._filelog) for x in c]
1154 filelog=self._filelog) for x in c]
1155
1155
1156 def blockancestors(fctx, fromline, toline):
1157 """Yield ancestors of `fctx` with respect to the block of lines within
1158 `fromline`-`toline` range.
1159 """
1160 def changesrange(fctx1, fctx2, linerange2):
1161 """Return `(diffinrange, linerange1)` where `diffinrange` is True
1162 if diff from fctx2 to fctx1 has changes in linerange2 and
1163 `linerange1` is the new line range for fctx1.
1164 """
1165 diffopts = patch.diffopts(fctx._repo.ui)
1166 blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts)
1167 filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2)
1168 diffinrange = any(stype == '!' for _, stype in filteredblocks)
1169 return diffinrange, linerange1
1170
1171 visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))}
1172 while visit:
1173 c, linerange2 = visit.pop(max(visit))
1174 pl = c.parents()
1175 if not pl:
1176 # The block originates from the initial revision.
1177 yield c
1178 continue
1179 inrange = False
1180 for p in pl:
1181 inrangep, linerange1 = changesrange(p, c, linerange2)
1182 inrange = inrange or inrangep
1183 if linerange1[0] == linerange1[1]:
1184 # Parent's linerange is empty, meaning that the block got
1185 # introduced in this revision; no need to go futher in this
1186 # branch.
1187 continue
1188 visit[p.linkrev(), p.filenode()] = p, linerange1
1189 if inrange:
1190 yield c
1191
1156 class committablectx(basectx):
1192 class committablectx(basectx):
1157 """A committablectx object provides common functionality for a context that
1193 """A committablectx object provides common functionality for a context that
1158 wants the ability to commit, e.g. workingctx or memctx."""
1194 wants the ability to commit, e.g. workingctx or memctx."""
1159 def __init__(self, repo, text="", user=None, date=None, extra=None,
1195 def __init__(self, repo, text="", user=None, date=None, extra=None,
1160 changes=None):
1196 changes=None):
1161 self._repo = repo
1197 self._repo = repo
1162 self._rev = None
1198 self._rev = None
1163 self._node = None
1199 self._node = None
1164 self._text = text
1200 self._text = text
1165 if date:
1201 if date:
1166 self._date = util.parsedate(date)
1202 self._date = util.parsedate(date)
1167 if user:
1203 if user:
1168 self._user = user
1204 self._user = user
1169 if changes:
1205 if changes:
1170 self._status = changes
1206 self._status = changes
1171
1207
1172 self._extra = {}
1208 self._extra = {}
1173 if extra:
1209 if extra:
1174 self._extra = extra.copy()
1210 self._extra = extra.copy()
1175 if 'branch' not in self._extra:
1211 if 'branch' not in self._extra:
1176 try:
1212 try:
1177 branch = encoding.fromlocal(self._repo.dirstate.branch())
1213 branch = encoding.fromlocal(self._repo.dirstate.branch())
1178 except UnicodeDecodeError:
1214 except UnicodeDecodeError:
1179 raise error.Abort(_('branch name not in UTF-8!'))
1215 raise error.Abort(_('branch name not in UTF-8!'))
1180 self._extra['branch'] = branch
1216 self._extra['branch'] = branch
1181 if self._extra['branch'] == '':
1217 if self._extra['branch'] == '':
1182 self._extra['branch'] = 'default'
1218 self._extra['branch'] = 'default'
1183
1219
1184 def __str__(self):
1220 def __str__(self):
1185 return str(self._parents[0]) + "+"
1221 return str(self._parents[0]) + "+"
1186
1222
1187 def __nonzero__(self):
1223 def __nonzero__(self):
1188 return True
1224 return True
1189
1225
1190 def _buildflagfunc(self):
1226 def _buildflagfunc(self):
1191 # Create a fallback function for getting file flags when the
1227 # Create a fallback function for getting file flags when the
1192 # filesystem doesn't support them
1228 # filesystem doesn't support them
1193
1229
1194 copiesget = self._repo.dirstate.copies().get
1230 copiesget = self._repo.dirstate.copies().get
1195 parents = self.parents()
1231 parents = self.parents()
1196 if len(parents) < 2:
1232 if len(parents) < 2:
1197 # when we have one parent, it's easy: copy from parent
1233 # when we have one parent, it's easy: copy from parent
1198 man = parents[0].manifest()
1234 man = parents[0].manifest()
1199 def func(f):
1235 def func(f):
1200 f = copiesget(f, f)
1236 f = copiesget(f, f)
1201 return man.flags(f)
1237 return man.flags(f)
1202 else:
1238 else:
1203 # merges are tricky: we try to reconstruct the unstored
1239 # merges are tricky: we try to reconstruct the unstored
1204 # result from the merge (issue1802)
1240 # result from the merge (issue1802)
1205 p1, p2 = parents
1241 p1, p2 = parents
1206 pa = p1.ancestor(p2)
1242 pa = p1.ancestor(p2)
1207 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1243 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1208
1244
1209 def func(f):
1245 def func(f):
1210 f = copiesget(f, f) # may be wrong for merges with copies
1246 f = copiesget(f, f) # may be wrong for merges with copies
1211 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1247 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1212 if fl1 == fl2:
1248 if fl1 == fl2:
1213 return fl1
1249 return fl1
1214 if fl1 == fla:
1250 if fl1 == fla:
1215 return fl2
1251 return fl2
1216 if fl2 == fla:
1252 if fl2 == fla:
1217 return fl1
1253 return fl1
1218 return '' # punt for conflicts
1254 return '' # punt for conflicts
1219
1255
1220 return func
1256 return func
1221
1257
1222 @propertycache
1258 @propertycache
1223 def _flagfunc(self):
1259 def _flagfunc(self):
1224 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1260 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1225
1261
1226 @propertycache
1262 @propertycache
1227 def _manifest(self):
1263 def _manifest(self):
1228 """generate a manifest corresponding to the values in self._status
1264 """generate a manifest corresponding to the values in self._status
1229
1265
1230 This reuse the file nodeid from parent, but we append an extra letter
1266 This reuse the file nodeid from parent, but we append an extra letter
1231 when modified. Modified files get an extra 'm' while added files get
1267 when modified. Modified files get an extra 'm' while added files get
1232 an extra 'a'. This is used by manifests merge to see that files
1268 an extra 'a'. This is used by manifests merge to see that files
1233 are different and by update logic to avoid deleting newly added files.
1269 are different and by update logic to avoid deleting newly added files.
1234 """
1270 """
1235 parents = self.parents()
1271 parents = self.parents()
1236
1272
1237 man = parents[0].manifest().copy()
1273 man = parents[0].manifest().copy()
1238
1274
1239 ff = self._flagfunc
1275 ff = self._flagfunc
1240 for i, l in ((addednodeid, self._status.added),
1276 for i, l in ((addednodeid, self._status.added),
1241 (modifiednodeid, self._status.modified)):
1277 (modifiednodeid, self._status.modified)):
1242 for f in l:
1278 for f in l:
1243 man[f] = i
1279 man[f] = i
1244 try:
1280 try:
1245 man.setflag(f, ff(f))
1281 man.setflag(f, ff(f))
1246 except OSError:
1282 except OSError:
1247 pass
1283 pass
1248
1284
1249 for f in self._status.deleted + self._status.removed:
1285 for f in self._status.deleted + self._status.removed:
1250 if f in man:
1286 if f in man:
1251 del man[f]
1287 del man[f]
1252
1288
1253 return man
1289 return man
1254
1290
1255 @propertycache
1291 @propertycache
1256 def _status(self):
1292 def _status(self):
1257 return self._repo.status()
1293 return self._repo.status()
1258
1294
1259 @propertycache
1295 @propertycache
1260 def _user(self):
1296 def _user(self):
1261 return self._repo.ui.username()
1297 return self._repo.ui.username()
1262
1298
1263 @propertycache
1299 @propertycache
1264 def _date(self):
1300 def _date(self):
1265 return util.makedate()
1301 return util.makedate()
1266
1302
1267 def subrev(self, subpath):
1303 def subrev(self, subpath):
1268 return None
1304 return None
1269
1305
1270 def manifestnode(self):
1306 def manifestnode(self):
1271 return None
1307 return None
1272 def user(self):
1308 def user(self):
1273 return self._user or self._repo.ui.username()
1309 return self._user or self._repo.ui.username()
1274 def date(self):
1310 def date(self):
1275 return self._date
1311 return self._date
1276 def description(self):
1312 def description(self):
1277 return self._text
1313 return self._text
1278 def files(self):
1314 def files(self):
1279 return sorted(self._status.modified + self._status.added +
1315 return sorted(self._status.modified + self._status.added +
1280 self._status.removed)
1316 self._status.removed)
1281
1317
1282 def modified(self):
1318 def modified(self):
1283 return self._status.modified
1319 return self._status.modified
1284 def added(self):
1320 def added(self):
1285 return self._status.added
1321 return self._status.added
1286 def removed(self):
1322 def removed(self):
1287 return self._status.removed
1323 return self._status.removed
1288 def deleted(self):
1324 def deleted(self):
1289 return self._status.deleted
1325 return self._status.deleted
1290 def branch(self):
1326 def branch(self):
1291 return encoding.tolocal(self._extra['branch'])
1327 return encoding.tolocal(self._extra['branch'])
1292 def closesbranch(self):
1328 def closesbranch(self):
1293 return 'close' in self._extra
1329 return 'close' in self._extra
1294 def extra(self):
1330 def extra(self):
1295 return self._extra
1331 return self._extra
1296
1332
1297 def tags(self):
1333 def tags(self):
1298 return []
1334 return []
1299
1335
1300 def bookmarks(self):
1336 def bookmarks(self):
1301 b = []
1337 b = []
1302 for p in self.parents():
1338 for p in self.parents():
1303 b.extend(p.bookmarks())
1339 b.extend(p.bookmarks())
1304 return b
1340 return b
1305
1341
1306 def phase(self):
1342 def phase(self):
1307 phase = phases.draft # default phase to draft
1343 phase = phases.draft # default phase to draft
1308 for p in self.parents():
1344 for p in self.parents():
1309 phase = max(phase, p.phase())
1345 phase = max(phase, p.phase())
1310 return phase
1346 return phase
1311
1347
1312 def hidden(self):
1348 def hidden(self):
1313 return False
1349 return False
1314
1350
1315 def children(self):
1351 def children(self):
1316 return []
1352 return []
1317
1353
1318 def flags(self, path):
1354 def flags(self, path):
1319 if '_manifest' in self.__dict__:
1355 if '_manifest' in self.__dict__:
1320 try:
1356 try:
1321 return self._manifest.flags(path)
1357 return self._manifest.flags(path)
1322 except KeyError:
1358 except KeyError:
1323 return ''
1359 return ''
1324
1360
1325 try:
1361 try:
1326 return self._flagfunc(path)
1362 return self._flagfunc(path)
1327 except OSError:
1363 except OSError:
1328 return ''
1364 return ''
1329
1365
1330 def ancestor(self, c2):
1366 def ancestor(self, c2):
1331 """return the "best" ancestor context of self and c2"""
1367 """return the "best" ancestor context of self and c2"""
1332 return self._parents[0].ancestor(c2) # punt on two parents for now
1368 return self._parents[0].ancestor(c2) # punt on two parents for now
1333
1369
1334 def walk(self, match):
1370 def walk(self, match):
1335 '''Generates matching file names.'''
1371 '''Generates matching file names.'''
1336 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1372 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1337 True, False))
1373 True, False))
1338
1374
1339 def matches(self, match):
1375 def matches(self, match):
1340 return sorted(self._repo.dirstate.matches(match))
1376 return sorted(self._repo.dirstate.matches(match))
1341
1377
1342 def ancestors(self):
1378 def ancestors(self):
1343 for p in self._parents:
1379 for p in self._parents:
1344 yield p
1380 yield p
1345 for a in self._repo.changelog.ancestors(
1381 for a in self._repo.changelog.ancestors(
1346 [p.rev() for p in self._parents]):
1382 [p.rev() for p in self._parents]):
1347 yield changectx(self._repo, a)
1383 yield changectx(self._repo, a)
1348
1384
1349 def markcommitted(self, node):
1385 def markcommitted(self, node):
1350 """Perform post-commit cleanup necessary after committing this ctx
1386 """Perform post-commit cleanup necessary after committing this ctx
1351
1387
1352 Specifically, this updates backing stores this working context
1388 Specifically, this updates backing stores this working context
1353 wraps to reflect the fact that the changes reflected by this
1389 wraps to reflect the fact that the changes reflected by this
1354 workingctx have been committed. For example, it marks
1390 workingctx have been committed. For example, it marks
1355 modified and added files as normal in the dirstate.
1391 modified and added files as normal in the dirstate.
1356
1392
1357 """
1393 """
1358
1394
1359 self._repo.dirstate.beginparentchange()
1395 self._repo.dirstate.beginparentchange()
1360 for f in self.modified() + self.added():
1396 for f in self.modified() + self.added():
1361 self._repo.dirstate.normal(f)
1397 self._repo.dirstate.normal(f)
1362 for f in self.removed():
1398 for f in self.removed():
1363 self._repo.dirstate.drop(f)
1399 self._repo.dirstate.drop(f)
1364 self._repo.dirstate.setparents(node)
1400 self._repo.dirstate.setparents(node)
1365 self._repo.dirstate.endparentchange()
1401 self._repo.dirstate.endparentchange()
1366
1402
1367 # write changes out explicitly, because nesting wlock at
1403 # write changes out explicitly, because nesting wlock at
1368 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1404 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1369 # from immediately doing so for subsequent changing files
1405 # from immediately doing so for subsequent changing files
1370 self._repo.dirstate.write(self._repo.currenttransaction())
1406 self._repo.dirstate.write(self._repo.currenttransaction())
1371
1407
1372 class workingctx(committablectx):
1408 class workingctx(committablectx):
1373 """A workingctx object makes access to data related to
1409 """A workingctx object makes access to data related to
1374 the current working directory convenient.
1410 the current working directory convenient.
1375 date - any valid date string or (unixtime, offset), or None.
1411 date - any valid date string or (unixtime, offset), or None.
1376 user - username string, or None.
1412 user - username string, or None.
1377 extra - a dictionary of extra values, or None.
1413 extra - a dictionary of extra values, or None.
1378 changes - a list of file lists as returned by localrepo.status()
1414 changes - a list of file lists as returned by localrepo.status()
1379 or None to use the repository status.
1415 or None to use the repository status.
1380 """
1416 """
1381 def __init__(self, repo, text="", user=None, date=None, extra=None,
1417 def __init__(self, repo, text="", user=None, date=None, extra=None,
1382 changes=None):
1418 changes=None):
1383 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1419 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1384
1420
1385 def __iter__(self):
1421 def __iter__(self):
1386 d = self._repo.dirstate
1422 d = self._repo.dirstate
1387 for f in d:
1423 for f in d:
1388 if d[f] != 'r':
1424 if d[f] != 'r':
1389 yield f
1425 yield f
1390
1426
1391 def __contains__(self, key):
1427 def __contains__(self, key):
1392 return self._repo.dirstate[key] not in "?r"
1428 return self._repo.dirstate[key] not in "?r"
1393
1429
1394 def hex(self):
1430 def hex(self):
1395 return hex(wdirid)
1431 return hex(wdirid)
1396
1432
1397 @propertycache
1433 @propertycache
1398 def _parents(self):
1434 def _parents(self):
1399 p = self._repo.dirstate.parents()
1435 p = self._repo.dirstate.parents()
1400 if p[1] == nullid:
1436 if p[1] == nullid:
1401 p = p[:-1]
1437 p = p[:-1]
1402 return [changectx(self._repo, x) for x in p]
1438 return [changectx(self._repo, x) for x in p]
1403
1439
1404 def filectx(self, path, filelog=None):
1440 def filectx(self, path, filelog=None):
1405 """get a file context from the working directory"""
1441 """get a file context from the working directory"""
1406 return workingfilectx(self._repo, path, workingctx=self,
1442 return workingfilectx(self._repo, path, workingctx=self,
1407 filelog=filelog)
1443 filelog=filelog)
1408
1444
1409 def dirty(self, missing=False, merge=True, branch=True):
1445 def dirty(self, missing=False, merge=True, branch=True):
1410 "check whether a working directory is modified"
1446 "check whether a working directory is modified"
1411 # check subrepos first
1447 # check subrepos first
1412 for s in sorted(self.substate):
1448 for s in sorted(self.substate):
1413 if self.sub(s).dirty():
1449 if self.sub(s).dirty():
1414 return True
1450 return True
1415 # check current working dir
1451 # check current working dir
1416 return ((merge and self.p2()) or
1452 return ((merge and self.p2()) or
1417 (branch and self.branch() != self.p1().branch()) or
1453 (branch and self.branch() != self.p1().branch()) or
1418 self.modified() or self.added() or self.removed() or
1454 self.modified() or self.added() or self.removed() or
1419 (missing and self.deleted()))
1455 (missing and self.deleted()))
1420
1456
1421 def add(self, list, prefix=""):
1457 def add(self, list, prefix=""):
1422 join = lambda f: os.path.join(prefix, f)
1458 join = lambda f: os.path.join(prefix, f)
1423 with self._repo.wlock():
1459 with self._repo.wlock():
1424 ui, ds = self._repo.ui, self._repo.dirstate
1460 ui, ds = self._repo.ui, self._repo.dirstate
1425 rejected = []
1461 rejected = []
1426 lstat = self._repo.wvfs.lstat
1462 lstat = self._repo.wvfs.lstat
1427 for f in list:
1463 for f in list:
1428 scmutil.checkportable(ui, join(f))
1464 scmutil.checkportable(ui, join(f))
1429 try:
1465 try:
1430 st = lstat(f)
1466 st = lstat(f)
1431 except OSError:
1467 except OSError:
1432 ui.warn(_("%s does not exist!\n") % join(f))
1468 ui.warn(_("%s does not exist!\n") % join(f))
1433 rejected.append(f)
1469 rejected.append(f)
1434 continue
1470 continue
1435 if st.st_size > 10000000:
1471 if st.st_size > 10000000:
1436 ui.warn(_("%s: up to %d MB of RAM may be required "
1472 ui.warn(_("%s: up to %d MB of RAM may be required "
1437 "to manage this file\n"
1473 "to manage this file\n"
1438 "(use 'hg revert %s' to cancel the "
1474 "(use 'hg revert %s' to cancel the "
1439 "pending addition)\n")
1475 "pending addition)\n")
1440 % (f, 3 * st.st_size // 1000000, join(f)))
1476 % (f, 3 * st.st_size // 1000000, join(f)))
1441 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1477 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1442 ui.warn(_("%s not added: only files and symlinks "
1478 ui.warn(_("%s not added: only files and symlinks "
1443 "supported currently\n") % join(f))
1479 "supported currently\n") % join(f))
1444 rejected.append(f)
1480 rejected.append(f)
1445 elif ds[f] in 'amn':
1481 elif ds[f] in 'amn':
1446 ui.warn(_("%s already tracked!\n") % join(f))
1482 ui.warn(_("%s already tracked!\n") % join(f))
1447 elif ds[f] == 'r':
1483 elif ds[f] == 'r':
1448 ds.normallookup(f)
1484 ds.normallookup(f)
1449 else:
1485 else:
1450 ds.add(f)
1486 ds.add(f)
1451 return rejected
1487 return rejected
1452
1488
1453 def forget(self, files, prefix=""):
1489 def forget(self, files, prefix=""):
1454 join = lambda f: os.path.join(prefix, f)
1490 join = lambda f: os.path.join(prefix, f)
1455 with self._repo.wlock():
1491 with self._repo.wlock():
1456 rejected = []
1492 rejected = []
1457 for f in files:
1493 for f in files:
1458 if f not in self._repo.dirstate:
1494 if f not in self._repo.dirstate:
1459 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1495 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1460 rejected.append(f)
1496 rejected.append(f)
1461 elif self._repo.dirstate[f] != 'a':
1497 elif self._repo.dirstate[f] != 'a':
1462 self._repo.dirstate.remove(f)
1498 self._repo.dirstate.remove(f)
1463 else:
1499 else:
1464 self._repo.dirstate.drop(f)
1500 self._repo.dirstate.drop(f)
1465 return rejected
1501 return rejected
1466
1502
1467 def undelete(self, list):
1503 def undelete(self, list):
1468 pctxs = self.parents()
1504 pctxs = self.parents()
1469 with self._repo.wlock():
1505 with self._repo.wlock():
1470 for f in list:
1506 for f in list:
1471 if self._repo.dirstate[f] != 'r':
1507 if self._repo.dirstate[f] != 'r':
1472 self._repo.ui.warn(_("%s not removed!\n") % f)
1508 self._repo.ui.warn(_("%s not removed!\n") % f)
1473 else:
1509 else:
1474 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1510 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1475 t = fctx.data()
1511 t = fctx.data()
1476 self._repo.wwrite(f, t, fctx.flags())
1512 self._repo.wwrite(f, t, fctx.flags())
1477 self._repo.dirstate.normal(f)
1513 self._repo.dirstate.normal(f)
1478
1514
1479 def copy(self, source, dest):
1515 def copy(self, source, dest):
1480 try:
1516 try:
1481 st = self._repo.wvfs.lstat(dest)
1517 st = self._repo.wvfs.lstat(dest)
1482 except OSError as err:
1518 except OSError as err:
1483 if err.errno != errno.ENOENT:
1519 if err.errno != errno.ENOENT:
1484 raise
1520 raise
1485 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1521 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1486 return
1522 return
1487 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1523 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1488 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1524 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1489 "symbolic link\n") % dest)
1525 "symbolic link\n") % dest)
1490 else:
1526 else:
1491 with self._repo.wlock():
1527 with self._repo.wlock():
1492 if self._repo.dirstate[dest] in '?':
1528 if self._repo.dirstate[dest] in '?':
1493 self._repo.dirstate.add(dest)
1529 self._repo.dirstate.add(dest)
1494 elif self._repo.dirstate[dest] in 'r':
1530 elif self._repo.dirstate[dest] in 'r':
1495 self._repo.dirstate.normallookup(dest)
1531 self._repo.dirstate.normallookup(dest)
1496 self._repo.dirstate.copy(source, dest)
1532 self._repo.dirstate.copy(source, dest)
1497
1533
1498 def match(self, pats=[], include=None, exclude=None, default='glob',
1534 def match(self, pats=[], include=None, exclude=None, default='glob',
1499 listsubrepos=False, badfn=None):
1535 listsubrepos=False, badfn=None):
1500 r = self._repo
1536 r = self._repo
1501
1537
1502 # Only a case insensitive filesystem needs magic to translate user input
1538 # Only a case insensitive filesystem needs magic to translate user input
1503 # to actual case in the filesystem.
1539 # to actual case in the filesystem.
1504 if not util.fscasesensitive(r.root):
1540 if not util.fscasesensitive(r.root):
1505 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1541 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1506 exclude, default, r.auditor, self,
1542 exclude, default, r.auditor, self,
1507 listsubrepos=listsubrepos,
1543 listsubrepos=listsubrepos,
1508 badfn=badfn)
1544 badfn=badfn)
1509 return matchmod.match(r.root, r.getcwd(), pats,
1545 return matchmod.match(r.root, r.getcwd(), pats,
1510 include, exclude, default,
1546 include, exclude, default,
1511 auditor=r.auditor, ctx=self,
1547 auditor=r.auditor, ctx=self,
1512 listsubrepos=listsubrepos, badfn=badfn)
1548 listsubrepos=listsubrepos, badfn=badfn)
1513
1549
1514 def _filtersuspectsymlink(self, files):
1550 def _filtersuspectsymlink(self, files):
1515 if not files or self._repo.dirstate._checklink:
1551 if not files or self._repo.dirstate._checklink:
1516 return files
1552 return files
1517
1553
1518 # Symlink placeholders may get non-symlink-like contents
1554 # Symlink placeholders may get non-symlink-like contents
1519 # via user error or dereferencing by NFS or Samba servers,
1555 # via user error or dereferencing by NFS or Samba servers,
1520 # so we filter out any placeholders that don't look like a
1556 # so we filter out any placeholders that don't look like a
1521 # symlink
1557 # symlink
1522 sane = []
1558 sane = []
1523 for f in files:
1559 for f in files:
1524 if self.flags(f) == 'l':
1560 if self.flags(f) == 'l':
1525 d = self[f].data()
1561 d = self[f].data()
1526 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1562 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1527 self._repo.ui.debug('ignoring suspect symlink placeholder'
1563 self._repo.ui.debug('ignoring suspect symlink placeholder'
1528 ' "%s"\n' % f)
1564 ' "%s"\n' % f)
1529 continue
1565 continue
1530 sane.append(f)
1566 sane.append(f)
1531 return sane
1567 return sane
1532
1568
1533 def _checklookup(self, files):
1569 def _checklookup(self, files):
1534 # check for any possibly clean files
1570 # check for any possibly clean files
1535 if not files:
1571 if not files:
1536 return [], []
1572 return [], []
1537
1573
1538 modified = []
1574 modified = []
1539 fixup = []
1575 fixup = []
1540 pctx = self._parents[0]
1576 pctx = self._parents[0]
1541 # do a full compare of any files that might have changed
1577 # do a full compare of any files that might have changed
1542 for f in sorted(files):
1578 for f in sorted(files):
1543 if (f not in pctx or self.flags(f) != pctx.flags(f)
1579 if (f not in pctx or self.flags(f) != pctx.flags(f)
1544 or pctx[f].cmp(self[f])):
1580 or pctx[f].cmp(self[f])):
1545 modified.append(f)
1581 modified.append(f)
1546 else:
1582 else:
1547 fixup.append(f)
1583 fixup.append(f)
1548
1584
1549 # update dirstate for files that are actually clean
1585 # update dirstate for files that are actually clean
1550 if fixup:
1586 if fixup:
1551 try:
1587 try:
1552 # updating the dirstate is optional
1588 # updating the dirstate is optional
1553 # so we don't wait on the lock
1589 # so we don't wait on the lock
1554 # wlock can invalidate the dirstate, so cache normal _after_
1590 # wlock can invalidate the dirstate, so cache normal _after_
1555 # taking the lock
1591 # taking the lock
1556 with self._repo.wlock(False):
1592 with self._repo.wlock(False):
1557 normal = self._repo.dirstate.normal
1593 normal = self._repo.dirstate.normal
1558 for f in fixup:
1594 for f in fixup:
1559 normal(f)
1595 normal(f)
1560 # write changes out explicitly, because nesting
1596 # write changes out explicitly, because nesting
1561 # wlock at runtime may prevent 'wlock.release()'
1597 # wlock at runtime may prevent 'wlock.release()'
1562 # after this block from doing so for subsequent
1598 # after this block from doing so for subsequent
1563 # changing files
1599 # changing files
1564 self._repo.dirstate.write(self._repo.currenttransaction())
1600 self._repo.dirstate.write(self._repo.currenttransaction())
1565 except error.LockError:
1601 except error.LockError:
1566 pass
1602 pass
1567 return modified, fixup
1603 return modified, fixup
1568
1604
1569 def _manifestmatches(self, match, s):
1605 def _manifestmatches(self, match, s):
1570 """Slow path for workingctx
1606 """Slow path for workingctx
1571
1607
1572 The fast path is when we compare the working directory to its parent
1608 The fast path is when we compare the working directory to its parent
1573 which means this function is comparing with a non-parent; therefore we
1609 which means this function is comparing with a non-parent; therefore we
1574 need to build a manifest and return what matches.
1610 need to build a manifest and return what matches.
1575 """
1611 """
1576 mf = self._repo['.']._manifestmatches(match, s)
1612 mf = self._repo['.']._manifestmatches(match, s)
1577 for f in s.modified + s.added:
1613 for f in s.modified + s.added:
1578 mf[f] = newnodeid
1614 mf[f] = newnodeid
1579 mf.setflag(f, self.flags(f))
1615 mf.setflag(f, self.flags(f))
1580 for f in s.removed:
1616 for f in s.removed:
1581 if f in mf:
1617 if f in mf:
1582 del mf[f]
1618 del mf[f]
1583 return mf
1619 return mf
1584
1620
1585 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1621 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1586 unknown=False):
1622 unknown=False):
1587 '''Gets the status from the dirstate -- internal use only.'''
1623 '''Gets the status from the dirstate -- internal use only.'''
1588 listignored, listclean, listunknown = ignored, clean, unknown
1624 listignored, listclean, listunknown = ignored, clean, unknown
1589 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1625 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1590 subrepos = []
1626 subrepos = []
1591 if '.hgsub' in self:
1627 if '.hgsub' in self:
1592 subrepos = sorted(self.substate)
1628 subrepos = sorted(self.substate)
1593 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1629 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1594 listclean, listunknown)
1630 listclean, listunknown)
1595
1631
1596 # check for any possibly clean files
1632 # check for any possibly clean files
1597 if cmp:
1633 if cmp:
1598 modified2, fixup = self._checklookup(cmp)
1634 modified2, fixup = self._checklookup(cmp)
1599 s.modified.extend(modified2)
1635 s.modified.extend(modified2)
1600
1636
1601 # update dirstate for files that are actually clean
1637 # update dirstate for files that are actually clean
1602 if fixup and listclean:
1638 if fixup and listclean:
1603 s.clean.extend(fixup)
1639 s.clean.extend(fixup)
1604
1640
1605 if match.always():
1641 if match.always():
1606 # cache for performance
1642 # cache for performance
1607 if s.unknown or s.ignored or s.clean:
1643 if s.unknown or s.ignored or s.clean:
1608 # "_status" is cached with list*=False in the normal route
1644 # "_status" is cached with list*=False in the normal route
1609 self._status = scmutil.status(s.modified, s.added, s.removed,
1645 self._status = scmutil.status(s.modified, s.added, s.removed,
1610 s.deleted, [], [], [])
1646 s.deleted, [], [], [])
1611 else:
1647 else:
1612 self._status = s
1648 self._status = s
1613
1649
1614 return s
1650 return s
1615
1651
1616 def _buildstatus(self, other, s, match, listignored, listclean,
1652 def _buildstatus(self, other, s, match, listignored, listclean,
1617 listunknown):
1653 listunknown):
1618 """build a status with respect to another context
1654 """build a status with respect to another context
1619
1655
1620 This includes logic for maintaining the fast path of status when
1656 This includes logic for maintaining the fast path of status when
1621 comparing the working directory against its parent, which is to skip
1657 comparing the working directory against its parent, which is to skip
1622 building a new manifest if self (working directory) is not comparing
1658 building a new manifest if self (working directory) is not comparing
1623 against its parent (repo['.']).
1659 against its parent (repo['.']).
1624 """
1660 """
1625 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1661 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1626 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1662 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1627 # might have accidentally ended up with the entire contents of the file
1663 # might have accidentally ended up with the entire contents of the file
1628 # they are supposed to be linking to.
1664 # they are supposed to be linking to.
1629 s.modified[:] = self._filtersuspectsymlink(s.modified)
1665 s.modified[:] = self._filtersuspectsymlink(s.modified)
1630 if other != self._repo['.']:
1666 if other != self._repo['.']:
1631 s = super(workingctx, self)._buildstatus(other, s, match,
1667 s = super(workingctx, self)._buildstatus(other, s, match,
1632 listignored, listclean,
1668 listignored, listclean,
1633 listunknown)
1669 listunknown)
1634 return s
1670 return s
1635
1671
1636 def _matchstatus(self, other, match):
1672 def _matchstatus(self, other, match):
1637 """override the match method with a filter for directory patterns
1673 """override the match method with a filter for directory patterns
1638
1674
1639 We use inheritance to customize the match.bad method only in cases of
1675 We use inheritance to customize the match.bad method only in cases of
1640 workingctx since it belongs only to the working directory when
1676 workingctx since it belongs only to the working directory when
1641 comparing against the parent changeset.
1677 comparing against the parent changeset.
1642
1678
1643 If we aren't comparing against the working directory's parent, then we
1679 If we aren't comparing against the working directory's parent, then we
1644 just use the default match object sent to us.
1680 just use the default match object sent to us.
1645 """
1681 """
1646 superself = super(workingctx, self)
1682 superself = super(workingctx, self)
1647 match = superself._matchstatus(other, match)
1683 match = superself._matchstatus(other, match)
1648 if other != self._repo['.']:
1684 if other != self._repo['.']:
1649 def bad(f, msg):
1685 def bad(f, msg):
1650 # 'f' may be a directory pattern from 'match.files()',
1686 # 'f' may be a directory pattern from 'match.files()',
1651 # so 'f not in ctx1' is not enough
1687 # so 'f not in ctx1' is not enough
1652 if f not in other and not other.hasdir(f):
1688 if f not in other and not other.hasdir(f):
1653 self._repo.ui.warn('%s: %s\n' %
1689 self._repo.ui.warn('%s: %s\n' %
1654 (self._repo.dirstate.pathto(f), msg))
1690 (self._repo.dirstate.pathto(f), msg))
1655 match.bad = bad
1691 match.bad = bad
1656 return match
1692 return match
1657
1693
1658 class committablefilectx(basefilectx):
1694 class committablefilectx(basefilectx):
1659 """A committablefilectx provides common functionality for a file context
1695 """A committablefilectx provides common functionality for a file context
1660 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1696 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1661 def __init__(self, repo, path, filelog=None, ctx=None):
1697 def __init__(self, repo, path, filelog=None, ctx=None):
1662 self._repo = repo
1698 self._repo = repo
1663 self._path = path
1699 self._path = path
1664 self._changeid = None
1700 self._changeid = None
1665 self._filerev = self._filenode = None
1701 self._filerev = self._filenode = None
1666
1702
1667 if filelog is not None:
1703 if filelog is not None:
1668 self._filelog = filelog
1704 self._filelog = filelog
1669 if ctx:
1705 if ctx:
1670 self._changectx = ctx
1706 self._changectx = ctx
1671
1707
1672 def __nonzero__(self):
1708 def __nonzero__(self):
1673 return True
1709 return True
1674
1710
1675 def linkrev(self):
1711 def linkrev(self):
1676 # linked to self._changectx no matter if file is modified or not
1712 # linked to self._changectx no matter if file is modified or not
1677 return self.rev()
1713 return self.rev()
1678
1714
1679 def parents(self):
1715 def parents(self):
1680 '''return parent filectxs, following copies if necessary'''
1716 '''return parent filectxs, following copies if necessary'''
1681 def filenode(ctx, path):
1717 def filenode(ctx, path):
1682 return ctx._manifest.get(path, nullid)
1718 return ctx._manifest.get(path, nullid)
1683
1719
1684 path = self._path
1720 path = self._path
1685 fl = self._filelog
1721 fl = self._filelog
1686 pcl = self._changectx._parents
1722 pcl = self._changectx._parents
1687 renamed = self.renamed()
1723 renamed = self.renamed()
1688
1724
1689 if renamed:
1725 if renamed:
1690 pl = [renamed + (None,)]
1726 pl = [renamed + (None,)]
1691 else:
1727 else:
1692 pl = [(path, filenode(pcl[0], path), fl)]
1728 pl = [(path, filenode(pcl[0], path), fl)]
1693
1729
1694 for pc in pcl[1:]:
1730 for pc in pcl[1:]:
1695 pl.append((path, filenode(pc, path), fl))
1731 pl.append((path, filenode(pc, path), fl))
1696
1732
1697 return [self._parentfilectx(p, fileid=n, filelog=l)
1733 return [self._parentfilectx(p, fileid=n, filelog=l)
1698 for p, n, l in pl if n != nullid]
1734 for p, n, l in pl if n != nullid]
1699
1735
1700 def children(self):
1736 def children(self):
1701 return []
1737 return []
1702
1738
1703 class workingfilectx(committablefilectx):
1739 class workingfilectx(committablefilectx):
1704 """A workingfilectx object makes access to data related to a particular
1740 """A workingfilectx object makes access to data related to a particular
1705 file in the working directory convenient."""
1741 file in the working directory convenient."""
1706 def __init__(self, repo, path, filelog=None, workingctx=None):
1742 def __init__(self, repo, path, filelog=None, workingctx=None):
1707 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1743 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1708
1744
1709 @propertycache
1745 @propertycache
1710 def _changectx(self):
1746 def _changectx(self):
1711 return workingctx(self._repo)
1747 return workingctx(self._repo)
1712
1748
1713 def data(self):
1749 def data(self):
1714 return self._repo.wread(self._path)
1750 return self._repo.wread(self._path)
1715 def renamed(self):
1751 def renamed(self):
1716 rp = self._repo.dirstate.copied(self._path)
1752 rp = self._repo.dirstate.copied(self._path)
1717 if not rp:
1753 if not rp:
1718 return None
1754 return None
1719 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1755 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1720
1756
1721 def size(self):
1757 def size(self):
1722 return self._repo.wvfs.lstat(self._path).st_size
1758 return self._repo.wvfs.lstat(self._path).st_size
1723 def date(self):
1759 def date(self):
1724 t, tz = self._changectx.date()
1760 t, tz = self._changectx.date()
1725 try:
1761 try:
1726 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1762 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1727 except OSError as err:
1763 except OSError as err:
1728 if err.errno != errno.ENOENT:
1764 if err.errno != errno.ENOENT:
1729 raise
1765 raise
1730 return (t, tz)
1766 return (t, tz)
1731
1767
1732 def cmp(self, fctx):
1768 def cmp(self, fctx):
1733 """compare with other file context
1769 """compare with other file context
1734
1770
1735 returns True if different than fctx.
1771 returns True if different than fctx.
1736 """
1772 """
1737 # fctx should be a filectx (not a workingfilectx)
1773 # fctx should be a filectx (not a workingfilectx)
1738 # invert comparison to reuse the same code path
1774 # invert comparison to reuse the same code path
1739 return fctx.cmp(self)
1775 return fctx.cmp(self)
1740
1776
1741 def remove(self, ignoremissing=False):
1777 def remove(self, ignoremissing=False):
1742 """wraps unlink for a repo's working directory"""
1778 """wraps unlink for a repo's working directory"""
1743 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1779 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1744
1780
1745 def write(self, data, flags):
1781 def write(self, data, flags):
1746 """wraps repo.wwrite"""
1782 """wraps repo.wwrite"""
1747 self._repo.wwrite(self._path, data, flags)
1783 self._repo.wwrite(self._path, data, flags)
1748
1784
1749 class workingcommitctx(workingctx):
1785 class workingcommitctx(workingctx):
1750 """A workingcommitctx object makes access to data related to
1786 """A workingcommitctx object makes access to data related to
1751 the revision being committed convenient.
1787 the revision being committed convenient.
1752
1788
1753 This hides changes in the working directory, if they aren't
1789 This hides changes in the working directory, if they aren't
1754 committed in this context.
1790 committed in this context.
1755 """
1791 """
1756 def __init__(self, repo, changes,
1792 def __init__(self, repo, changes,
1757 text="", user=None, date=None, extra=None):
1793 text="", user=None, date=None, extra=None):
1758 super(workingctx, self).__init__(repo, text, user, date, extra,
1794 super(workingctx, self).__init__(repo, text, user, date, extra,
1759 changes)
1795 changes)
1760
1796
1761 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1797 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1762 unknown=False):
1798 unknown=False):
1763 """Return matched files only in ``self._status``
1799 """Return matched files only in ``self._status``
1764
1800
1765 Uncommitted files appear "clean" via this context, even if
1801 Uncommitted files appear "clean" via this context, even if
1766 they aren't actually so in the working directory.
1802 they aren't actually so in the working directory.
1767 """
1803 """
1768 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1804 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1769 if clean:
1805 if clean:
1770 clean = [f for f in self._manifest if f not in self._changedset]
1806 clean = [f for f in self._manifest if f not in self._changedset]
1771 else:
1807 else:
1772 clean = []
1808 clean = []
1773 return scmutil.status([f for f in self._status.modified if match(f)],
1809 return scmutil.status([f for f in self._status.modified if match(f)],
1774 [f for f in self._status.added if match(f)],
1810 [f for f in self._status.added if match(f)],
1775 [f for f in self._status.removed if match(f)],
1811 [f for f in self._status.removed if match(f)],
1776 [], [], [], clean)
1812 [], [], [], clean)
1777
1813
1778 @propertycache
1814 @propertycache
1779 def _changedset(self):
1815 def _changedset(self):
1780 """Return the set of files changed in this context
1816 """Return the set of files changed in this context
1781 """
1817 """
1782 changed = set(self._status.modified)
1818 changed = set(self._status.modified)
1783 changed.update(self._status.added)
1819 changed.update(self._status.added)
1784 changed.update(self._status.removed)
1820 changed.update(self._status.removed)
1785 return changed
1821 return changed
1786
1822
1787 def makecachingfilectxfn(func):
1823 def makecachingfilectxfn(func):
1788 """Create a filectxfn that caches based on the path.
1824 """Create a filectxfn that caches based on the path.
1789
1825
1790 We can't use util.cachefunc because it uses all arguments as the cache
1826 We can't use util.cachefunc because it uses all arguments as the cache
1791 key and this creates a cycle since the arguments include the repo and
1827 key and this creates a cycle since the arguments include the repo and
1792 memctx.
1828 memctx.
1793 """
1829 """
1794 cache = {}
1830 cache = {}
1795
1831
1796 def getfilectx(repo, memctx, path):
1832 def getfilectx(repo, memctx, path):
1797 if path not in cache:
1833 if path not in cache:
1798 cache[path] = func(repo, memctx, path)
1834 cache[path] = func(repo, memctx, path)
1799 return cache[path]
1835 return cache[path]
1800
1836
1801 return getfilectx
1837 return getfilectx
1802
1838
1803 class memctx(committablectx):
1839 class memctx(committablectx):
1804 """Use memctx to perform in-memory commits via localrepo.commitctx().
1840 """Use memctx to perform in-memory commits via localrepo.commitctx().
1805
1841
1806 Revision information is supplied at initialization time while
1842 Revision information is supplied at initialization time while
1807 related files data and is made available through a callback
1843 related files data and is made available through a callback
1808 mechanism. 'repo' is the current localrepo, 'parents' is a
1844 mechanism. 'repo' is the current localrepo, 'parents' is a
1809 sequence of two parent revisions identifiers (pass None for every
1845 sequence of two parent revisions identifiers (pass None for every
1810 missing parent), 'text' is the commit message and 'files' lists
1846 missing parent), 'text' is the commit message and 'files' lists
1811 names of files touched by the revision (normalized and relative to
1847 names of files touched by the revision (normalized and relative to
1812 repository root).
1848 repository root).
1813
1849
1814 filectxfn(repo, memctx, path) is a callable receiving the
1850 filectxfn(repo, memctx, path) is a callable receiving the
1815 repository, the current memctx object and the normalized path of
1851 repository, the current memctx object and the normalized path of
1816 requested file, relative to repository root. It is fired by the
1852 requested file, relative to repository root. It is fired by the
1817 commit function for every file in 'files', but calls order is
1853 commit function for every file in 'files', but calls order is
1818 undefined. If the file is available in the revision being
1854 undefined. If the file is available in the revision being
1819 committed (updated or added), filectxfn returns a memfilectx
1855 committed (updated or added), filectxfn returns a memfilectx
1820 object. If the file was removed, filectxfn raises an
1856 object. If the file was removed, filectxfn raises an
1821 IOError. Moved files are represented by marking the source file
1857 IOError. Moved files are represented by marking the source file
1822 removed and the new file added with copy information (see
1858 removed and the new file added with copy information (see
1823 memfilectx).
1859 memfilectx).
1824
1860
1825 user receives the committer name and defaults to current
1861 user receives the committer name and defaults to current
1826 repository username, date is the commit date in any format
1862 repository username, date is the commit date in any format
1827 supported by util.parsedate() and defaults to current date, extra
1863 supported by util.parsedate() and defaults to current date, extra
1828 is a dictionary of metadata or is left empty.
1864 is a dictionary of metadata or is left empty.
1829 """
1865 """
1830
1866
1831 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1867 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1832 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1868 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1833 # this field to determine what to do in filectxfn.
1869 # this field to determine what to do in filectxfn.
1834 _returnnoneformissingfiles = True
1870 _returnnoneformissingfiles = True
1835
1871
1836 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1872 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1837 date=None, extra=None, editor=False):
1873 date=None, extra=None, editor=False):
1838 super(memctx, self).__init__(repo, text, user, date, extra)
1874 super(memctx, self).__init__(repo, text, user, date, extra)
1839 self._rev = None
1875 self._rev = None
1840 self._node = None
1876 self._node = None
1841 parents = [(p or nullid) for p in parents]
1877 parents = [(p or nullid) for p in parents]
1842 p1, p2 = parents
1878 p1, p2 = parents
1843 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1879 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1844 files = sorted(set(files))
1880 files = sorted(set(files))
1845 self._files = files
1881 self._files = files
1846 self.substate = {}
1882 self.substate = {}
1847
1883
1848 # if store is not callable, wrap it in a function
1884 # if store is not callable, wrap it in a function
1849 if not callable(filectxfn):
1885 if not callable(filectxfn):
1850 def getfilectx(repo, memctx, path):
1886 def getfilectx(repo, memctx, path):
1851 fctx = filectxfn[path]
1887 fctx = filectxfn[path]
1852 # this is weird but apparently we only keep track of one parent
1888 # this is weird but apparently we only keep track of one parent
1853 # (why not only store that instead of a tuple?)
1889 # (why not only store that instead of a tuple?)
1854 copied = fctx.renamed()
1890 copied = fctx.renamed()
1855 if copied:
1891 if copied:
1856 copied = copied[0]
1892 copied = copied[0]
1857 return memfilectx(repo, path, fctx.data(),
1893 return memfilectx(repo, path, fctx.data(),
1858 islink=fctx.islink(), isexec=fctx.isexec(),
1894 islink=fctx.islink(), isexec=fctx.isexec(),
1859 copied=copied, memctx=memctx)
1895 copied=copied, memctx=memctx)
1860 self._filectxfn = getfilectx
1896 self._filectxfn = getfilectx
1861 else:
1897 else:
1862 # memoizing increases performance for e.g. vcs convert scenarios.
1898 # memoizing increases performance for e.g. vcs convert scenarios.
1863 self._filectxfn = makecachingfilectxfn(filectxfn)
1899 self._filectxfn = makecachingfilectxfn(filectxfn)
1864
1900
1865 if extra:
1901 if extra:
1866 self._extra = extra.copy()
1902 self._extra = extra.copy()
1867 else:
1903 else:
1868 self._extra = {}
1904 self._extra = {}
1869
1905
1870 if self._extra.get('branch', '') == '':
1906 if self._extra.get('branch', '') == '':
1871 self._extra['branch'] = 'default'
1907 self._extra['branch'] = 'default'
1872
1908
1873 if editor:
1909 if editor:
1874 self._text = editor(self._repo, self, [])
1910 self._text = editor(self._repo, self, [])
1875 self._repo.savecommitmessage(self._text)
1911 self._repo.savecommitmessage(self._text)
1876
1912
1877 def filectx(self, path, filelog=None):
1913 def filectx(self, path, filelog=None):
1878 """get a file context from the working directory
1914 """get a file context from the working directory
1879
1915
1880 Returns None if file doesn't exist and should be removed."""
1916 Returns None if file doesn't exist and should be removed."""
1881 return self._filectxfn(self._repo, self, path)
1917 return self._filectxfn(self._repo, self, path)
1882
1918
1883 def commit(self):
1919 def commit(self):
1884 """commit context to the repo"""
1920 """commit context to the repo"""
1885 return self._repo.commitctx(self)
1921 return self._repo.commitctx(self)
1886
1922
1887 @propertycache
1923 @propertycache
1888 def _manifest(self):
1924 def _manifest(self):
1889 """generate a manifest based on the return values of filectxfn"""
1925 """generate a manifest based on the return values of filectxfn"""
1890
1926
1891 # keep this simple for now; just worry about p1
1927 # keep this simple for now; just worry about p1
1892 pctx = self._parents[0]
1928 pctx = self._parents[0]
1893 man = pctx.manifest().copy()
1929 man = pctx.manifest().copy()
1894
1930
1895 for f in self._status.modified:
1931 for f in self._status.modified:
1896 p1node = nullid
1932 p1node = nullid
1897 p2node = nullid
1933 p2node = nullid
1898 p = pctx[f].parents() # if file isn't in pctx, check p2?
1934 p = pctx[f].parents() # if file isn't in pctx, check p2?
1899 if len(p) > 0:
1935 if len(p) > 0:
1900 p1node = p[0].filenode()
1936 p1node = p[0].filenode()
1901 if len(p) > 1:
1937 if len(p) > 1:
1902 p2node = p[1].filenode()
1938 p2node = p[1].filenode()
1903 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1939 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1904
1940
1905 for f in self._status.added:
1941 for f in self._status.added:
1906 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1942 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1907
1943
1908 for f in self._status.removed:
1944 for f in self._status.removed:
1909 if f in man:
1945 if f in man:
1910 del man[f]
1946 del man[f]
1911
1947
1912 return man
1948 return man
1913
1949
1914 @propertycache
1950 @propertycache
1915 def _status(self):
1951 def _status(self):
1916 """Calculate exact status from ``files`` specified at construction
1952 """Calculate exact status from ``files`` specified at construction
1917 """
1953 """
1918 man1 = self.p1().manifest()
1954 man1 = self.p1().manifest()
1919 p2 = self._parents[1]
1955 p2 = self._parents[1]
1920 # "1 < len(self._parents)" can't be used for checking
1956 # "1 < len(self._parents)" can't be used for checking
1921 # existence of the 2nd parent, because "memctx._parents" is
1957 # existence of the 2nd parent, because "memctx._parents" is
1922 # explicitly initialized by the list, of which length is 2.
1958 # explicitly initialized by the list, of which length is 2.
1923 if p2.node() != nullid:
1959 if p2.node() != nullid:
1924 man2 = p2.manifest()
1960 man2 = p2.manifest()
1925 managing = lambda f: f in man1 or f in man2
1961 managing = lambda f: f in man1 or f in man2
1926 else:
1962 else:
1927 managing = lambda f: f in man1
1963 managing = lambda f: f in man1
1928
1964
1929 modified, added, removed = [], [], []
1965 modified, added, removed = [], [], []
1930 for f in self._files:
1966 for f in self._files:
1931 if not managing(f):
1967 if not managing(f):
1932 added.append(f)
1968 added.append(f)
1933 elif self[f]:
1969 elif self[f]:
1934 modified.append(f)
1970 modified.append(f)
1935 else:
1971 else:
1936 removed.append(f)
1972 removed.append(f)
1937
1973
1938 return scmutil.status(modified, added, removed, [], [], [], [])
1974 return scmutil.status(modified, added, removed, [], [], [], [])
1939
1975
1940 class memfilectx(committablefilectx):
1976 class memfilectx(committablefilectx):
1941 """memfilectx represents an in-memory file to commit.
1977 """memfilectx represents an in-memory file to commit.
1942
1978
1943 See memctx and committablefilectx for more details.
1979 See memctx and committablefilectx for more details.
1944 """
1980 """
1945 def __init__(self, repo, path, data, islink=False,
1981 def __init__(self, repo, path, data, islink=False,
1946 isexec=False, copied=None, memctx=None):
1982 isexec=False, copied=None, memctx=None):
1947 """
1983 """
1948 path is the normalized file path relative to repository root.
1984 path is the normalized file path relative to repository root.
1949 data is the file content as a string.
1985 data is the file content as a string.
1950 islink is True if the file is a symbolic link.
1986 islink is True if the file is a symbolic link.
1951 isexec is True if the file is executable.
1987 isexec is True if the file is executable.
1952 copied is the source file path if current file was copied in the
1988 copied is the source file path if current file was copied in the
1953 revision being committed, or None."""
1989 revision being committed, or None."""
1954 super(memfilectx, self).__init__(repo, path, None, memctx)
1990 super(memfilectx, self).__init__(repo, path, None, memctx)
1955 self._data = data
1991 self._data = data
1956 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1992 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1957 self._copied = None
1993 self._copied = None
1958 if copied:
1994 if copied:
1959 self._copied = (copied, nullid)
1995 self._copied = (copied, nullid)
1960
1996
1961 def data(self):
1997 def data(self):
1962 return self._data
1998 return self._data
1963 def size(self):
1999 def size(self):
1964 return len(self.data())
2000 return len(self.data())
1965 def flags(self):
2001 def flags(self):
1966 return self._flags
2002 return self._flags
1967 def renamed(self):
2003 def renamed(self):
1968 return self._copied
2004 return self._copied
1969
2005
1970 def remove(self, ignoremissing=False):
2006 def remove(self, ignoremissing=False):
1971 """wraps unlink for a repo's working directory"""
2007 """wraps unlink for a repo's working directory"""
1972 # need to figure out what to do here
2008 # need to figure out what to do here
1973 del self._changectx[self._path]
2009 del self._changectx[self._path]
1974
2010
1975 def write(self, data, flags):
2011 def write(self, data, flags):
1976 """wraps repo.wwrite"""
2012 """wraps repo.wwrite"""
1977 self._data = data
2013 self._data = data
1978
2014
1979 class metadataonlyctx(committablectx):
2015 class metadataonlyctx(committablectx):
1980 """Like memctx but it's reusing the manifest of different commit.
2016 """Like memctx but it's reusing the manifest of different commit.
1981 Intended to be used by lightweight operations that are creating
2017 Intended to be used by lightweight operations that are creating
1982 metadata-only changes.
2018 metadata-only changes.
1983
2019
1984 Revision information is supplied at initialization time. 'repo' is the
2020 Revision information is supplied at initialization time. 'repo' is the
1985 current localrepo, 'ctx' is original revision which manifest we're reuisng
2021 current localrepo, 'ctx' is original revision which manifest we're reuisng
1986 'parents' is a sequence of two parent revisions identifiers (pass None for
2022 'parents' is a sequence of two parent revisions identifiers (pass None for
1987 every missing parent), 'text' is the commit.
2023 every missing parent), 'text' is the commit.
1988
2024
1989 user receives the committer name and defaults to current repository
2025 user receives the committer name and defaults to current repository
1990 username, date is the commit date in any format supported by
2026 username, date is the commit date in any format supported by
1991 util.parsedate() and defaults to current date, extra is a dictionary of
2027 util.parsedate() and defaults to current date, extra is a dictionary of
1992 metadata or is left empty.
2028 metadata or is left empty.
1993 """
2029 """
1994 def __new__(cls, repo, originalctx, *args, **kwargs):
2030 def __new__(cls, repo, originalctx, *args, **kwargs):
1995 return super(metadataonlyctx, cls).__new__(cls, repo)
2031 return super(metadataonlyctx, cls).__new__(cls, repo)
1996
2032
1997 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2033 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
1998 extra=None, editor=False):
2034 extra=None, editor=False):
1999 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2035 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2000 self._rev = None
2036 self._rev = None
2001 self._node = None
2037 self._node = None
2002 self._originalctx = originalctx
2038 self._originalctx = originalctx
2003 self._manifestnode = originalctx.manifestnode()
2039 self._manifestnode = originalctx.manifestnode()
2004 parents = [(p or nullid) for p in parents]
2040 parents = [(p or nullid) for p in parents]
2005 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2041 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2006
2042
2007 # sanity check to ensure that the reused manifest parents are
2043 # sanity check to ensure that the reused manifest parents are
2008 # manifests of our commit parents
2044 # manifests of our commit parents
2009 mp1, mp2 = self.manifestctx().parents
2045 mp1, mp2 = self.manifestctx().parents
2010 if p1 != nullid and p1.manifestctx().node() != mp1:
2046 if p1 != nullid and p1.manifestctx().node() != mp1:
2011 raise RuntimeError('can\'t reuse the manifest: '
2047 raise RuntimeError('can\'t reuse the manifest: '
2012 'its p1 doesn\'t match the new ctx p1')
2048 'its p1 doesn\'t match the new ctx p1')
2013 if p2 != nullid and p2.manifestctx().node() != mp2:
2049 if p2 != nullid and p2.manifestctx().node() != mp2:
2014 raise RuntimeError('can\'t reuse the manifest: '
2050 raise RuntimeError('can\'t reuse the manifest: '
2015 'its p2 doesn\'t match the new ctx p2')
2051 'its p2 doesn\'t match the new ctx p2')
2016
2052
2017 self._files = originalctx.files()
2053 self._files = originalctx.files()
2018 self.substate = {}
2054 self.substate = {}
2019
2055
2020 if extra:
2056 if extra:
2021 self._extra = extra.copy()
2057 self._extra = extra.copy()
2022 else:
2058 else:
2023 self._extra = {}
2059 self._extra = {}
2024
2060
2025 if self._extra.get('branch', '') == '':
2061 if self._extra.get('branch', '') == '':
2026 self._extra['branch'] = 'default'
2062 self._extra['branch'] = 'default'
2027
2063
2028 if editor:
2064 if editor:
2029 self._text = editor(self._repo, self, [])
2065 self._text = editor(self._repo, self, [])
2030 self._repo.savecommitmessage(self._text)
2066 self._repo.savecommitmessage(self._text)
2031
2067
2032 def manifestnode(self):
2068 def manifestnode(self):
2033 return self._manifestnode
2069 return self._manifestnode
2034
2070
2035 @propertycache
2071 @propertycache
2036 def _manifestctx(self):
2072 def _manifestctx(self):
2037 return self._repo.manifestlog[self._manifestnode]
2073 return self._repo.manifestlog[self._manifestnode]
2038
2074
2039 def filectx(self, path, filelog=None):
2075 def filectx(self, path, filelog=None):
2040 return self._originalctx.filectx(path, filelog=filelog)
2076 return self._originalctx.filectx(path, filelog=filelog)
2041
2077
2042 def commit(self):
2078 def commit(self):
2043 """commit context to the repo"""
2079 """commit context to the repo"""
2044 return self._repo.commitctx(self)
2080 return self._repo.commitctx(self)
2045
2081
2046 @property
2082 @property
2047 def _manifest(self):
2083 def _manifest(self):
2048 return self._originalctx.manifest()
2084 return self._originalctx.manifest()
2049
2085
2050 @propertycache
2086 @propertycache
2051 def _status(self):
2087 def _status(self):
2052 """Calculate exact status from ``files`` specified in the ``origctx``
2088 """Calculate exact status from ``files`` specified in the ``origctx``
2053 and parents manifests.
2089 and parents manifests.
2054 """
2090 """
2055 man1 = self.p1().manifest()
2091 man1 = self.p1().manifest()
2056 p2 = self._parents[1]
2092 p2 = self._parents[1]
2057 # "1 < len(self._parents)" can't be used for checking
2093 # "1 < len(self._parents)" can't be used for checking
2058 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2094 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2059 # explicitly initialized by the list, of which length is 2.
2095 # explicitly initialized by the list, of which length is 2.
2060 if p2.node() != nullid:
2096 if p2.node() != nullid:
2061 man2 = p2.manifest()
2097 man2 = p2.manifest()
2062 managing = lambda f: f in man1 or f in man2
2098 managing = lambda f: f in man1 or f in man2
2063 else:
2099 else:
2064 managing = lambda f: f in man1
2100 managing = lambda f: f in man1
2065
2101
2066 modified, added, removed = [], [], []
2102 modified, added, removed = [], [], []
2067 for f in self._files:
2103 for f in self._files:
2068 if not managing(f):
2104 if not managing(f):
2069 added.append(f)
2105 added.append(f)
2070 elif self[f]:
2106 elif self[f]:
2071 modified.append(f)
2107 modified.append(f)
2072 else:
2108 else:
2073 removed.append(f)
2109 removed.append(f)
2074
2110
2075 return scmutil.status(modified, added, removed, [], [], [], [])
2111 return scmutil.status(modified, added, removed, [], [], [], [])
General Comments 0
You need to be logged in to leave comments. Login now